mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f62596b0a | ||
|
|
a2b9878733 | ||
|
|
e166dfba48 | ||
|
|
d7d62eb8e6 | ||
|
|
61eb75fbb6 | ||
|
|
e9af5912ac | ||
|
|
c3b2f13fe0 | ||
|
|
0d847e0427 | ||
|
|
7de221ba89 | ||
|
|
16fd840896 | ||
|
|
20fb098a01 | ||
|
|
b734282a44 |
4
.github/workflows/bridge.yml
vendored
4
.github/workflows/bridge.yml
vendored
@@ -43,8 +43,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
@@ -94,8 +92,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -88,8 +88,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -104,8 +104,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -81,8 +81,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/integration.yaml
vendored
2
.github/workflows/integration.yaml
vendored
@@ -33,8 +33,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
@@ -64,11 +64,9 @@
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_tx_gas_per_chunk": 1123456,
|
||||
"max_l2_tx_num_per_chunk": 1123,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"min_l1_commit_calldata_size_per_chunk": 11234,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
@@ -77,7 +75,6 @@
|
||||
"max_chunk_num_per_batch": 112,
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"min_chunk_num_per_batch": 11,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
}
|
||||
|
||||
@@ -28,11 +28,9 @@ type L2Config struct {
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
@@ -43,7 +41,6 @@ type BatchProposerConfig struct {
|
||||
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
}
|
||||
|
||||
@@ -430,10 +430,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
case types.ProvingTaskProved:
|
||||
// It's an intermediate state. The prover manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
@@ -346,8 +346,15 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
|
||||
txInfo := map[string]interface{}{
|
||||
"tx_hash": tx.Hash().String(),
|
||||
"tx_type": s.config.TxType,
|
||||
"from": auth.From.String(),
|
||||
}
|
||||
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
|
||||
originalGasPrice := feeData.gasPrice
|
||||
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
|
||||
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
|
||||
if gasPrice.Cmp(feeData.gasPrice) < 0 {
|
||||
@@ -357,7 +364,13 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
gasPrice = maxGasPrice
|
||||
}
|
||||
feeData.gasPrice = gasPrice
|
||||
|
||||
txInfo["original_gas_price"] = originalGasPrice
|
||||
txInfo["adjusted_gas_price"] = gasPrice
|
||||
default:
|
||||
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
|
||||
|
||||
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
|
||||
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
|
||||
@@ -389,8 +402,15 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
|
||||
}
|
||||
|
||||
log.Debug("Transaction gas adjustment details", txInfo)
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
@@ -429,6 +449,12 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
}
|
||||
}
|
||||
} else if s.config.EscalateBlocks+pending.submitAt < number {
|
||||
log.Debug("resubmit transaction",
|
||||
"tx hash", pending.tx.Hash().String(),
|
||||
"submit block number", pending.submitAt,
|
||||
"current block number", number,
|
||||
"escalateBlocks", s.config.EscalateBlocks)
|
||||
|
||||
var tx *types.Transaction
|
||||
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
|
||||
if err != nil {
|
||||
@@ -485,8 +511,8 @@ func (s *Sender) checkBalance(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if bls.Cmp(s.minBalance) < 0 {
|
||||
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s",
|
||||
bls.String(), s.minBalance.String())
|
||||
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
|
||||
bls.String(), s.minBalance.String(), s.auth.From.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -28,7 +28,6 @@ type BatchProposer struct {
|
||||
maxChunkNumPerBatch uint64
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint32
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
@@ -39,12 +38,18 @@ type BatchProposer struct {
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstChunkTimeoutReached prometheus.Counter
|
||||
batchChunksSuperposeNotEnoughTotal prometheus.Counter
|
||||
batchFirstBlockTimeoutReached prometheus.Counter
|
||||
batchChunksProposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
log.Debug("new batch proposer",
|
||||
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec)
|
||||
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -54,7 +59,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
@@ -86,13 +90,13 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
|
||||
Help: "Total times of batch's first chunk timeout reached",
|
||||
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_first_block_timeout_reached_total",
|
||||
Help: "Total times of batch's first block timeout reached",
|
||||
}),
|
||||
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
|
||||
Help: "Total number of batch chunk superpose not enough",
|
||||
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -153,87 +157,86 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
firstChunk := dbChunks[0]
|
||||
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas := firstChunk.TotalL1CommitGas
|
||||
totalChunks := uint64(1)
|
||||
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
|
||||
var totalL1CommitCalldataSize uint32
|
||||
var totalL1CommitGas uint64
|
||||
var totalChunks uint64
|
||||
var totalL1MessagePopped uint64
|
||||
|
||||
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 16 // version in calldata
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
|
||||
if parentBatch != nil {
|
||||
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks[1:] {
|
||||
for i, chunk := range dbChunks {
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
|
||||
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
|
||||
totalChunks++
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return dbChunks[:i+1], nil
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if i == 0 {
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"currentTotalChunks", totalChunks,
|
||||
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
return dbChunks[:i], nil
|
||||
}
|
||||
}
|
||||
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
@@ -241,18 +244,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"first block timestamp", dbChunks[0].StartBlockTime,
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
p.batchFirstChunkTimeoutReached.Inc()
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
return dbChunks, nil
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
p.batchChunksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
|
||||
p.batchChunksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
|
||||
@@ -23,11 +23,9 @@ func testBatchProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
@@ -37,7 +35,6 @@ func testBatchProposer(t *testing.T) {
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -51,11 +51,9 @@ type ChunkProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxTxGasPerChunk uint64
|
||||
maxL2TxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
@@ -70,22 +68,27 @@ type ChunkProposer struct {
|
||||
totalTxGasUsed prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
|
||||
chunkFirstBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksProposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Debug("new chunk proposer",
|
||||
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
|
||||
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
|
||||
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
@@ -130,13 +133,13 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
|
||||
Help: "Total number of chunk block superpose not enough",
|
||||
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -188,97 +191,91 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
chunk := &types.Chunk{Blocks: blocks[:1]}
|
||||
firstBlock := chunk.Blocks[0]
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
var chunk types.Chunk
|
||||
var totalTxGasUsed uint64
|
||||
var totalL2TxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
crc := chunkRowConsumption{}
|
||||
totalL1CommitGas := chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(firstBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
max := crc.max()
|
||||
p.maxTxConsumption.Set(float64(max))
|
||||
if max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
crc,
|
||||
max,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for _, block := range blocks[1:] {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
for i, block := range blocks {
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if err := crc.add(block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
crcMax := crc.max()
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
|
||||
crc.max() > p.maxRowConsumptionPerChunk {
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
|
||||
break
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
crcMax > p.maxRowConsumptionPerChunk {
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if i == 0 {
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
block.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if crcMax > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
block.Header.Number,
|
||||
crc,
|
||||
crcMax,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
"totalL2TxNum", totalL2TxNum,
|
||||
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
|
||||
"chunkRowConsumptionMax", crcMax,
|
||||
"chunkRowConsumption", crc,
|
||||
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crcMax))
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
}
|
||||
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
@@ -286,17 +283,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
p.chunkBlockTimeoutReached.Inc()
|
||||
hasBlockTimeout = true
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crc.max()))
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
|
||||
log.Warn("The calldata size of the chunk is less than the minimum limit",
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return chunk, nil
|
||||
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
|
||||
p.chunkBlocksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -23,11 +23,9 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
@@ -55,11 +53,9 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
|
||||
@@ -311,7 +311,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
|
||||
@@ -58,11 +58,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
@@ -77,7 +75,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
28
common/libzkp/impl/Cargo.lock
generated
28
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1049,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1226,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1439,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1479,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -2077,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2264,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2279,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2754,8 +2754,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.6.4"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
|
||||
version = "0.6.6"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.6#282ea596fd6c7a113cfb3a3ef3bf3f6d5f1b0768"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.6.4"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
|
||||
version = "0.6.6"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.6#282ea596fd6c7a113cfb3a3ef3bf3f6d5f1b0768"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4491,7 +4491,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -24,8 +24,8 @@ snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag =
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.6" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.6" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
@@ -10,6 +10,14 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates keccak256 hash gas.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
@@ -94,10 +102,6 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range w.Transactions {
|
||||
@@ -120,9 +124,9 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += 16 * 4 // size of a uint32 field
|
||||
total += getKeccakGas(txPayloadLength) // l2 tx hash
|
||||
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += CalldataNonZeroByteGas * 4 // size of a uint32 field
|
||||
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// sload
|
||||
|
||||
@@ -146,14 +146,10 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
@@ -126,8 +126,8 @@ const (
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProvedDEPRECATED
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
// ProvingTaskFailed : fail to generate proof
|
||||
@@ -140,7 +140,7 @@ func (ps ProvingStatus) String() string {
|
||||
return "unassigned"
|
||||
case ProvingTaskAssigned:
|
||||
return "assigned"
|
||||
case ProvingTaskProved:
|
||||
case ProvingTaskProvedDEPRECATED:
|
||||
return "proved"
|
||||
case ProvingTaskVerified:
|
||||
return "verified"
|
||||
|
||||
@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
|
||||
"assigned",
|
||||
},
|
||||
{
|
||||
"ProvingTaskProved",
|
||||
ProvingTaskProved,
|
||||
"ProvingTaskProvedDEPRECATED",
|
||||
ProvingTaskProvedDEPRECATED,
|
||||
"proved",
|
||||
},
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.77"
|
||||
var tag = "v4.1.89"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -44,5 +44,7 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
return false
|
||||
}
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
return remote[2] == local[2] || // libzkp v0.6.6
|
||||
remote[2] == "ccb3cd4" || // libzkp v0.6.5
|
||||
remote[2] == "8c439b1" // libzkp v0.6.2
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ contract L2USDCGateway is L2ERC20Gateway {
|
||||
require(_token == l2USDC, "only USDC is allowed");
|
||||
require(!withdrawPaused, "withdraw paused");
|
||||
|
||||
// 1. Extract real sender if this call is from L1GatewayRouter.
|
||||
// 1. Extract real sender if this call is from L2GatewayRouter.
|
||||
address _from = msg.sender;
|
||||
if (router == msg.sender) {
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
|
||||
@@ -149,7 +149,7 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
|
||||
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, message.ProofType(assignedProverTask.TaskType), assignedProverTask.TaskID) {
|
||||
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
|
||||
}
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
|
||||
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -132,7 +133,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.ID, pk, pv)
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
@@ -231,12 +232,19 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
// Verify if the proving task has already been assigned to another prover.
|
||||
// Upon receiving an error message, it's possible the proving status has been reset by another prover
|
||||
// and the task has been reassigned. In this case, the coordinator should avoid resetting the proving status.
|
||||
m.processProverErr(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
|
||||
m.validateFailureProverTaskStatusNotOk.Inc()
|
||||
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", pk, "failureType", proofParameter.FailureType, "failureMessage", proofParameter.FailureMsg)
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", "failureMessage", failureMsg)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -264,15 +272,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
// log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
// "proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
//
|
||||
// if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
// log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
// }
|
||||
//}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
|
||||
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
@@ -377,6 +376,33 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
|
||||
return provingStatus == types.ProvingTaskVerified
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk string, taskType message.ProofType) {
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, taskType, taskID, pk, types.ProverProofInvalid); updateErr != nil {
|
||||
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
|
||||
}
|
||||
|
||||
proverTasks, err := m.proverTaskOrm.GetValidOrAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
|
||||
if err != nil {
|
||||
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(proverTasks) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch taskType {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
|
||||
// store the proof to prover task
|
||||
var proofBytes []byte
|
||||
|
||||
@@ -91,11 +91,11 @@ func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch,
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
|
||||
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned.
|
||||
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
|
||||
var assignedBatches []*Batch
|
||||
if err := db.Find(&assignedBatches).Error; err != nil {
|
||||
@@ -236,7 +236,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
@@ -250,6 +250,30 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusFromProverError updates batch proving status when prover prove failed
|
||||
func (o *Batch) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProvingStatusOptimistic error: %w, batch hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
|
||||
@@ -155,11 +155,11 @@ func (o *Chunk) GetProvingStatusByHash(ctx context.Context, hash string) (types.
|
||||
return types.ProvingStatus(chunk.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
|
||||
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned.
|
||||
func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
@@ -285,7 +285,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
db := o.db
|
||||
@@ -302,6 +302,29 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusFromProverError updates chunk proving status when prover prove failed
|
||||
func (o *Chunk) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateProvingStatusOptimistic error: %w, chunk hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the chunk proof by hash.
|
||||
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.ChunkProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -67,6 +68,7 @@ func TestProverTaskOrm(t *testing.T) {
|
||||
reward.SetString("18446744073709551616", 10) // 1 << 64, uint64 maximum 1<<64 -1
|
||||
|
||||
proverTask := ProverTask{
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
TaskID: "test-hash",
|
||||
ProverName: "prover-0",
|
||||
ProverPublicKey: "0",
|
||||
@@ -77,7 +79,7 @@ func TestProverTaskOrm(t *testing.T) {
|
||||
|
||||
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
|
||||
assert.NoError(t, err)
|
||||
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
|
||||
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(proverTasks))
|
||||
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
|
||||
@@ -91,7 +93,7 @@ func TestProverTaskOrm(t *testing.T) {
|
||||
proverTask.AssignedAt = utils.NowUTC()
|
||||
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
|
||||
assert.NoError(t, err)
|
||||
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
|
||||
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(proverTasks))
|
||||
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
|
||||
@@ -106,6 +108,7 @@ func TestProverTaskOrmUint256(t *testing.T) {
|
||||
rewardUint256 := big.NewInt(0)
|
||||
rewardUint256.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10)
|
||||
proverTask := ProverTask{
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
TaskID: "test-hash",
|
||||
ProverName: "prover-0",
|
||||
ProverPublicKey: "0",
|
||||
@@ -116,7 +119,7 @@ func TestProverTaskOrmUint256(t *testing.T) {
|
||||
|
||||
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
|
||||
assert.NoError(t, err)
|
||||
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
|
||||
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(proverTasksUint256))
|
||||
resultRewardUint256 := proverTasksUint256[0].Reward.BigInt()
|
||||
|
||||
@@ -96,13 +96,14 @@ func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]inter
|
||||
|
||||
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
|
||||
// The returned prover task objects are sorted in ascending order by their ids.
|
||||
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
|
||||
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType message.ProofType, hashes []string) ([]*ProverTask, error) {
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id IN ?", hashes)
|
||||
db = db.Order("id asc")
|
||||
|
||||
@@ -114,9 +115,10 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
|
||||
}
|
||||
|
||||
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key", proverPublicKey)
|
||||
db = db.Where("prover_version", proverVersion)
|
||||
@@ -129,11 +131,28 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskID,
|
||||
return &proverTask, nil
|
||||
}
|
||||
|
||||
// GetValidOrAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
|
||||
func (o *ProverTask) GetValidOrAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key != ?", proverPublicKey)
|
||||
db = db.Where("proving_status in (?)", []int{int(types.ProverAssigned), int(types.ProverProofValid)})
|
||||
|
||||
var proverTasks []ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetAssignedProverTask error: %w, taskID: %v", err, taskID)
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProvingStatusByTaskID retrieves the proving status of a prover task
|
||||
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string) (types.ProverProveStatus, error) {
|
||||
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskType message.ProofType, taskID string) (types.ProverProveStatus, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Select("proving_status")
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id = ?", taskID)
|
||||
|
||||
var proverTask ProverTask
|
||||
@@ -161,9 +180,10 @@ func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit in
|
||||
}
|
||||
|
||||
// TaskTimeoutMoreThanOnce get the timeout twice task. a temp design
|
||||
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskID string) bool {
|
||||
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskType message.ProofType, taskID string) bool {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("proving_status", int(types.ProverProofInvalid))
|
||||
|
||||
|
||||
@@ -386,9 +386,9 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
|
||||
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
|
||||
return
|
||||
|
||||
@@ -39,7 +39,7 @@ create table chunk
|
||||
);
|
||||
|
||||
comment
|
||||
on column chunk.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
|
||||
on column chunk.proving_status is 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
|
||||
|
||||
create unique index chunk_index_uindex
|
||||
on chunk (index) where deleted_at IS NULL;
|
||||
|
||||
@@ -50,7 +50,7 @@ comment
|
||||
on column batch.chunk_proofs_status is 'undefined, pending, ready';
|
||||
|
||||
comment
|
||||
on column batch.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
|
||||
on column batch.proving_status is 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
|
||||
|
||||
comment
|
||||
on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, commit_failed, finalize_failed';
|
||||
|
||||
@@ -37,8 +37,11 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
SetBaseURL(cfg.BaseURL).
|
||||
AddRetryCondition(func(r *resty.Response, _ error) bool {
|
||||
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
return r.StatusCode() >= http.StatusInternalServerError
|
||||
if r.StatusCode() >= http.StatusInternalServerError {
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
log.Info("successfully initialized prover client",
|
||||
|
||||
Reference in New Issue
Block a user