Compare commits

..

5 Commits

Author SHA1 Message Date
georgehao
2764018553 feat: update 2023-08-18 16:46:24 +08:00
georgehao
4180105c45 feat: lint 2023-08-18 16:28:14 +08:00
georgehao
0292a772fd chore: auto version bump [bot] 2023-08-18 08:24:13 +00:00
georgehao
d2d75d8ee9 Merge branch 'develop' into fix/prover_bug 2023-08-18 16:23:55 +08:00
georgehao
fca6fc1579 feat: fix bug 2023-08-18 16:21:39 +08:00
89 changed files with 468 additions and 2042 deletions

View File

@@ -43,6 +43,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Lint
working-directory: 'bridge'
run: |
@@ -92,6 +94,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -15,8 +15,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
@@ -51,11 +49,7 @@ jobs:
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -88,6 +88,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -104,6 +104,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -81,6 +81,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -33,6 +33,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)

View File

@@ -64,9 +64,11 @@
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
@@ -75,6 +77,7 @@
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}

View File

@@ -28,9 +28,11 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
@@ -41,6 +43,7 @@ type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -226,7 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -430,6 +430,10 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The prover manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()

View File

@@ -346,15 +346,8 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
txInfo := map[string]interface{}{
"tx_hash": tx.Hash().String(),
"tx_type": s.config.TxType,
"from": auth.From.String(),
}
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
originalGasPrice := feeData.gasPrice
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
@@ -364,13 +357,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
gasPrice = maxGasPrice
}
feeData.gasPrice = gasPrice
txInfo["original_gas_price"] = originalGasPrice
txInfo["adjusted_gas_price"] = gasPrice
default:
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
@@ -402,15 +389,8 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap
txInfo["adjusted_gas_tip_cap"] = gasTipCap
txInfo["original_gas_fee_cap"] = originalGasFeeCap
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
}
log.Debug("Transaction gas adjustment details", txInfo)
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
@@ -449,12 +429,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
log.Debug("resubmit transaction",
"tx hash", pending.tx.Hash().String(),
"submit block number", pending.submitAt,
"current block number", number,
"escalateBlocks", s.config.EscalateBlocks)
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
if err != nil {
@@ -511,8 +485,8 @@ func (s *Sender) checkBalance(ctx context.Context) error {
}
if bls.Cmp(s.minBalance) < 0 {
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
bls.String(), s.minBalance.String(), s.auth.From.String())
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s",
bls.String(), s.minBalance.String())
}
return nil

View File

@@ -28,6 +28,7 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -38,18 +39,12 @@ type BatchProposer struct {
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
batchFirstChunkTimeoutReached prometheus.Counter
batchChunksSuperposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Debug("new batch proposer",
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec)
return &BatchProposer{
ctx: ctx,
db: db,
@@ -59,6 +54,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -90,13 +86,13 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
Help: "Total times of batch's first chunk timeout reached",
}),
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
Help: "Total number of batch chunk superpose not enough",
}),
}
}
@@ -157,86 +153,87 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return nil, nil
}
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
for i, chunk := range dbChunks {
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentTotalChunks", totalChunks,
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks[:i], nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
}
}
p.batchChunksNum.Set(float64(len(dbChunks)))
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -244,16 +241,18 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
hasChunkTimeout = true
p.batchFirstChunkTimeoutReached.Inc()
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
p.batchChunksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,9 +23,11 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -35,6 +37,7 @@ func testBatchProposer(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -51,9 +51,11 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -68,27 +70,22 @@ type ChunkProposer struct {
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
chunkBlockTimeoutReached prometheus.Counter
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -133,13 +130,13 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
Help: "Total number of chunk block superpose not enough",
}),
}
}
@@ -191,91 +188,97 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
crc := chunkRowConsumption{}
totalL1CommitGas := chunk.EstimateL1CommitGas()
for i, block := range blocks {
if err := crc.add(firstBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
p.chunkL2TxNum.Set(float64(totalL2TxNum))
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
max := crc.max()
p.maxTxConsumption.Set(float64(max))
if max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
crc,
max,
p.maxRowConsumptionPerChunk,
)
}
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if err := crc.add(block.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
block.Header.Number,
crc,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"chunkRowConsumptionMax", crcMax,
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crcMax))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
crc.max() > p.maxRowConsumptionPerChunk {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
}
chunk.Blocks = append(chunk.Blocks, block)
}
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -283,17 +286,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.chunkBlockTimeoutReached.Inc()
hasBlockTimeout = true
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return chunk, nil
}

View File

@@ -23,9 +23,11 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -53,9 +55,11 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)

View File

@@ -311,7 +311,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -201,7 +201,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -58,9 +58,11 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -75,6 +77,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -1049,7 +1049,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1226,7 +1226,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1479,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -1595,6 +1595,21 @@ dependencies = [
"rustc-hash",
]
[[package]]
name = "halo2-base"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"halo2_proofs",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand_chacha",
"rustc-hash",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
@@ -1602,7 +1617,26 @@ source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74
dependencies = [
"ff",
"group",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand",
"rand_chacha",
"rand_core",
"serde",
"serde_json",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"group",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"itertools",
"num-bigint",
"num-integer",
@@ -1633,7 +1667,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
dependencies = [
"ethers-core",
"halo2_proofs",
@@ -1655,7 +1689,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#b612b1e2a9fa2ccd150a6cb99e67592c8d62cd99"
dependencies = [
"ark-std",
"blake2b_simd",
@@ -2077,7 +2111,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2298,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2313,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,8 +2788,8 @@ dependencies = [
[[package]]
name = "prover"
version = "0.7.3"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.3#1b1fb34f8662dbb11af12c8dfd72f8da5ae80422"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"aggregator",
"anyhow",
@@ -3624,12 +3658,12 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "snark-verifier"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
dependencies = [
"bytes",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -3648,12 +3682,12 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.0.1"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
dependencies = [
"bincode",
"env_logger 0.10.0",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -4039,8 +4073,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.7.3"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.3#1b1fb34f8662dbb11af12c8dfd72f8da5ae80422"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4491,7 +4525,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"array-init",
"bus-mapping",
@@ -4501,8 +4535,8 @@ dependencies = [
"ethers-core",
"ethers-signers",
"gadgets",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2_proofs",
"hex",
"itertools",
@@ -4536,7 +4570,6 @@ dependencies = [
name = "zkp"
version = "0.1.0"
dependencies = [
"base64 0.13.1",
"env_logger 0.9.3",
"halo2_proofs",
"libc",

View File

@@ -19,19 +19,23 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[patch."https://github.com/scroll-tech/snark-verifier"]
snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.3" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.3" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.8.0"
env_logger = "0.9.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
libc = "0.2"
once_cell = "1.8.0"
[profile.test]
opt-level = 3

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, ChunkHash, ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,15 +13,11 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_dirs(params_dir, assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
}
@@ -34,35 +30,11 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> c_char {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert!(!chunk_proofs.is_empty());
let valid = panic::catch_unwind(|| PROVER.get().unwrap().check_chunk_proofs(&chunk_proofs));
valid.unwrap_or(false) as c_char
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
utils::init_env_and_log,
zkevm::{Prover, Verifier},
ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,14 +13,10 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
@@ -34,24 +30,11 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {

View File

@@ -19,10 +19,6 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}

View File

@@ -1,13 +1,10 @@
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_prover(char* params_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_prover(char* params_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);

View File

@@ -3,31 +3,20 @@ package types
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
txPayloadLengthCache map[string]uint64
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
}
// NumL1Messages returns the number of L1 messages in this block.
@@ -98,14 +87,17 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
if txData.Type == types.L1MessageTxType {
continue
}
size += 64 // 60 bytes BlockContext + 4 bytes payload length
size += w.getTxPayloadLength(txData)
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
@@ -114,10 +106,23 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
continue
}
txPayloadLength := w.getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 64 // 60 bytes BlockContext + 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
}
// sload
@@ -140,48 +145,3 @@ func (w *WrappedBlock) L2TxsNum() uint64 {
}
return count
}
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
if w.txPayloadLengthCache == nil {
w.txPayloadLengthCache = make(map[string]uint64)
}
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
return length
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
if err != nil {
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
return 0
}
txPayloadLength := uint64(len(rlpTxData))
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
return txPayloadLength
}
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
}
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
}
return rlpTxData, nil
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
)
@@ -64,7 +65,23 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
if txData.Type == types.L1MessageTxType {
continue
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, err
}
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, err
}
@@ -129,10 +146,14 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -38,15 +38,11 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6966), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -60,15 +56,11 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(1), wrappedBlock2.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -83,8 +75,6 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -146,81 +136,3 @@ func TestChunkHash(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}
func TestErrorPaths(t *testing.T) {
// test 1: Header.Number is not a uint64
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
bytes, err := wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "block number is not uint64")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
for i := 0; i < 65537; i++ {
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
}
bytes, err = wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
chunk := &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
wrappedBlock.Transactions[0].Data = "not-a-hex"
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
_, err = chunk.Hash(0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid byte")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
for i := 0; i < 65535; i++ {
tx := &wrappedBlock2.Transactions[i]
txCopy := *tx
txCopy.Nonce = uint64(i + 1)
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
}
bytes, err = wrappedBlock2.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
}

View File

@@ -126,8 +126,8 @@ const (
ProvingTaskUnassigned
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
ProvingTaskProvedDEPRECATED
// ProvingTaskProved DEPRECATED: proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified
// ProvingTaskFailed : fail to generate proof
@@ -140,7 +140,7 @@ func (ps ProvingStatus) String() string {
return "unassigned"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProvedDEPRECATED:
case ProvingTaskProved:
return "proved"
case ProvingTaskVerified:
return "verified"

View File

@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
"assigned",
},
{
"ProvingTaskProvedDEPRECATED",
ProvingTaskProvedDEPRECATED,
"ProvingTaskProved",
ProvingTaskProved,
"proved",
},
{

View File

@@ -3,11 +3,10 @@ package version
import (
"fmt"
"runtime/debug"
"strconv"
"strings"
)
var tag = "v4.1.104"
var tag = "v4.1.72"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -47,39 +46,3 @@ func CheckScrollProverVersion(proverVersion string) bool {
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor != 4 {
return false
}
if remoteTagMinor != 1 {
return false
}
if remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -260,23 +260,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -371,22 +354,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -563,23 +530,6 @@ Emitted when some ERC1155 token is refunded.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -227,23 +227,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -502,23 +469,6 @@ Emitted when some ERC721 token is refunded.
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -239,23 +239,6 @@ Mapping from queue index to previous replay queue index.
|---|---|---|
| _0 | uint256 | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessageWithProof
```solidity
@@ -453,22 +436,6 @@ Update max replay times.
|---|---|---|
| _newMaxReplayTimes | uint256 | The new max replay times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -642,22 +609,5 @@ Emitted when the maximum number of times each message can be replayed is updated
| oldMaxReplayTimes | uint256 | undefined |
| newMaxReplayTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -225,23 +225,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -292,22 +275,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -405,22 +372,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -223,23 +223,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -290,22 +273,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -403,22 +370,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -205,23 +205,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -488,23 +455,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -174,23 +174,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -263,22 +246,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -430,23 +397,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -194,23 +194,6 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessage
```solidity
@@ -345,22 +328,6 @@ Update max failed execution times.
|---|---|---|
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -534,22 +501,5 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
| oldMaxFailedExecutionTimes | uint256 | undefined |
| newMaxFailedExecutionTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -139,23 +139,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -223,22 +206,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -354,23 +321,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -172,23 +172,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -239,22 +222,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -370,23 +337,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -149,7 +149,7 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");
@@ -312,8 +312,6 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
uint256 _gasLimit,
address _refundAddress
) internal nonReentrant {
_addUsedAmount(_value);
address _messageQueue = messageQueue; // gas saving
address _counterpart = counterpart; // gas saving

View File

@@ -161,9 +161,6 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
// rate limit
_addUsedAmount(_token, _amount);
return (_from, _amount, _data);
}

View File

@@ -124,8 +124,6 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
// 2. Generate message passed to L1ScrollMessenger.
bytes memory _message = abi.encodeCall(IL2ETHGateway.finalizeDepositETH, (_from, _to, _amount, _data));

View File

@@ -137,7 +137,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
uint256 _gasLimit
) internal nonReentrant {
require(msg.value == _value, "msg.value mismatch");
_addUsedAmount(_value);
uint256 _nonce = L2MessageQueue(messageQueue).nextMessageIndex();
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(msg.sender, _to, _value, _nonce, _message));
@@ -166,7 +165,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
) internal {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");

View File

@@ -126,9 +126,6 @@ contract L2CustomERC20Gateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -98,8 +98,6 @@ contract L2ETHGateway is ScrollGatewayBase, IL2ETHGateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
bytes memory _message = abi.encodeCall(IL1ETHGateway.finalizeWithdrawETH, (_from, _to, _amount, _data));
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit);

View File

@@ -140,9 +140,6 @@ contract L2StandardERC20Gateway is L2ERC20Gateway {
address _l1Token = tokenMapping[_token];
require(_l1Token != address(0), "no corresponding l1 token");
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -116,9 +116,6 @@ contract L2WETHGateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
IWETH(_token).withdraw(_amount);

View File

@@ -128,7 +128,7 @@ contract L2USDCGateway is L2ERC20Gateway {
require(_token == l2USDC, "only USDC is allowed");
require(!withdrawPaused, "withdraw paused");
// 1. Extract real sender if this call is from L2GatewayRouter.
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from = msg.sender;
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));

View File

@@ -7,7 +7,6 @@ import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/security/
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {ScrollConstants} from "./constants/ScrollConstants.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
import {IScrollMessenger} from "./IScrollMessenger.sol";
// solhint-disable var-name-mixedcase
@@ -27,11 +26,6 @@ abstract contract ScrollMessengerBase is
/// @param _newFeeVault The address of new fee vault contract.
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault);
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -45,11 +39,8 @@ abstract contract ScrollMessengerBase is
/// @notice The address of fee vault, collecting cross domain messaging fee.
address public feeVault;
/// @notice The address of ETH rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -98,16 +89,6 @@ abstract contract ScrollMessengerBase is
emit UpdateFeeVault(_oldFeeVault, _newFeeVault);
}
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/// @notice Pause the contract
/// @dev This function can only called by contract owner.
/// @param _status The pause status to update.
@@ -147,27 +128,4 @@ abstract contract ScrollMessengerBase is
_message
);
}
/// @dev Internal function to increase ETH usage for the given `_sender`.
/// @param _amount The amount of ETH used.
function _addUsedAmount(uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
IETHRateLimiter(_rateLimiter).addUsedAmount(_amount);
}
}
/// @dev Internal function to check whether the `_target` address is allowed to avoid attack.
/// @param _target The address of target address to check.
function _validateTargetAddress(address _target) internal view {
// @note check more `_target` address to avoid attack in the future when we add more external contracts.
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
require(_target != _rateLimiter, "Forbid to call rate limiter");
}
require(_target != address(this), "Forbid to call self");
}
}

View File

@@ -9,18 +9,8 @@ import {IScrollGateway} from "./IScrollGateway.sol";
import {IScrollMessenger} from "../IScrollMessenger.sol";
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
import {ScrollConstants} from "../constants/ScrollConstants.sol";
import {ITokenRateLimiter} from "../../rate-limiter/ITokenRateLimiter.sol";
abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgradeable, IScrollGateway {
/**********
* Events *
**********/
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -34,11 +24,8 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
/// @inheritdoc IScrollGateway
address public override messenger;
/// @notice The address of token rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -85,20 +72,6 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
}
}
/************************
* Restricted Functions *
************************/
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/**********************
* Internal Functions *
**********************/
@@ -111,16 +84,4 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
IScrollGatewayCallback(_to).onScrollGatewayCallback(_data);
}
}
/// @dev Internal function to increase token usage for the given `_sender`.
/// @param _token The address of token.
/// @param _amount The amount of token used.
function _addUsedAmount(address _token, uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
ITokenRateLimiter(_rateLimiter).addUsedAmount(_token, _amount);
}
}
}

View File

@@ -1,116 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {IETHRateLimiter} from "./IETHRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract ETHRateLimiter is Ownable, IETHRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The ETH limit in wei.
uint104 limit;
// The amount of ETH in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/// @notice The address of ETH spender.
address public immutable spender;
/*************
* Variables *
*************/
/// @notice The token amount used in current period.
TokenAmount public currentPeriod;
/***************
* Constructor *
***************/
constructor(
uint256 _periodDuration,
address _spender,
uint104 _totalLimit
) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
if (_totalLimit == 0) {
revert TotalLimitIsZero();
}
periodDuration = _periodDuration;
spender = _spender;
currentPeriod.limit = _totalLimit;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IETHRateLimiter
function addUsedAmount(uint256 _amount) external override {
if (msg.sender != spender) {
revert CallerNotSpender();
}
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod;
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit();
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(uint104 _newTotalLimit) external onlyOwner {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero();
}
uint256 _oldTotalLimit = currentPeriod.limit;
currentPeriod.limit = _newTotalLimit;
emit UpdateTotalLimit(_oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface IETHRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
error TotalLimitIsZero();
/// @dev Thrown when an amount breaches the total limit in the period.
error ExceedTotalLimit();
/// @dev Thrown when the call is not spender.
error CallerNotSpender();
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some ETH usage for `sender`.
/// @param _amount The amount of ETH to use.
function addUsedAmount(uint256 _amount) external;
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface ITokenRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
/// @param token The address of the token.
error TotalLimitIsZero(address token);
/// @dev Thrown when an amount breaches the total limit in the period.
/// @param token The address of the token.
error ExceedTotalLimit(address token);
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some token usage for `sender`.
/// @param token The address of the token.
/// @param amount The amount of token to use.
function addUsedAmount(address token, uint256 amount) external;
}

View File

@@ -1,106 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {ITokenRateLimiter} from "./ITokenRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract TokenRateLimiter is AccessControlEnumerable, ITokenRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The token limit.
uint104 limit;
// The amount of token in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The role for token spender.
bytes32 public constant TOKEN_SPENDER_ROLE = keccak256("TOKEN_SPENDER_ROLE");
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/*************
* Variables *
*************/
/// @notice Mapping from token address to the total amounts used in current period and total token amount limit.
mapping(address => TokenAmount) public currentPeriod;
/// @dev The storage slots for future usage.
uint256[49] private __gap;
/***************
* Constructor *
***************/
constructor(uint256 _periodDuration) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
_setupRole(DEFAULT_ADMIN_ROLE, msg.sender);
periodDuration = _periodDuration;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc ITokenRateLimiter
function addUsedAmount(address _token, uint256 _amount) external override onlyRole(TOKEN_SPENDER_ROLE) {
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit, `0` means no limit at all.
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod[_token];
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentPeriod.limit != 0 && _currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit(_token);
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod[_token] = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(address _token, uint104 _newTotalLimit) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero(_token);
}
uint256 _oldTotalLimit = currentPeriod[_token].limit;
currentPeriod[_token].limit = _newTotalLimit;
emit UpdateTotalLimit(_token, _oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,75 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {ETHRateLimiter} from "../rate-limiter/ETHRateLimiter.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
contract ETHRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
ETHRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new ETHRateLimiter(86400, address(this), 100 ether);
}
function testUpdateTotalLimit(uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not owner, revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
limiter.updateTotalLimit(_newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(IETHRateLimiter.TotalLimitIsZero.selector);
limiter.updateTotalLimit(0);
// success
hevm.expectEmit(false, false, false, true);
emit UpdateTotalLimit(100 ether, _newTotalLimit);
limiter.updateTotalLimit(_newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod();
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount() external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(IETHRateLimiter.CallerNotSpender.selector);
limiter.addUsedAmount(0);
hevm.stopPrank();
// exceed total limit on first call
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(0);
// exceed total limit on second call
limiter.addUsedAmount(50 ether);
_checkTotalCurrentPeriodAmountAmount(50 ether);
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(1 ether);
_checkTotalCurrentPeriodAmountAmount(1 ether);
// exceed
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod();
assertEq(totalAmount, expected);
}
}

View File

@@ -42,26 +42,6 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
l1Messenger.relayMessageWithProof(address(this), address(messageQueue), 0, 0, new bytes(0), proof);
}
function testForbidCallRateLimiterFromL2() external {
l1Messenger.updateRateLimiter(address(1));
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(this),
address(1),
0,
0,
new bytes(0)
)
);
prepareL2MessageRoot(_xDomainCalldataHash);
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
hevm.expectRevert("Forbid to call rate limiter");
l1Messenger.relayMessageWithProof(address(this), address(1), 0, 0, new bytes(0), proof);
}
function testForbidCallSelfFromL2() external {
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(

View File

@@ -51,15 +51,10 @@ contract L2ScrollMessengerTest is DSTestPlus {
}
function testForbidCallFromL1() external {
l2Messenger.updateRateLimiter(address(1));
hevm.startPrank(AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger)));
hevm.expectRevert("Forbid to call message queue");
l2Messenger.relayMessage(address(this), address(l2MessageQueue), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call rate limiter");
l2Messenger.relayMessage(address(this), address(1), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call self");
l2Messenger.relayMessage(address(this), address(l2Messenger), 0, 0, new bytes(0));
hevm.stopPrank();

View File

@@ -1,82 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {TokenRateLimiter} from "../rate-limiter/TokenRateLimiter.sol";
import {ITokenRateLimiter} from "../rate-limiter/ITokenRateLimiter.sol";
contract TokenRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
TokenRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new TokenRateLimiter(86400);
}
function testUpdateTotalLimit(address _token, uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not admin, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
limiter.updateTotalLimit(_token, _newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.TotalLimitIsZero.selector, _token));
limiter.updateTotalLimit(_token, 0);
// success
hevm.expectEmit(true, false, false, true);
emit UpdateTotalLimit(_token, 0 ether, _newTotalLimit);
limiter.updateTotalLimit(_token, _newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod(_token);
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount(address _token) external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb"
);
limiter.addUsedAmount(_token, 0);
hevm.stopPrank();
limiter.grantRole(bytes32(0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb), address(this));
limiter.updateTotalLimit(_token, 100 ether);
// exceed total limit on first call
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 0);
// exceed total limit on second call
limiter.addUsedAmount(_token, 50 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(_token, 1 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
// exceed
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(address token, uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod(token);
assertEq(totalAmount, expected);
}
}

View File

@@ -7,7 +7,6 @@ import (
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
)
var (
@@ -26,14 +25,9 @@ var (
// InitController inits Controller with database
func InitController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) {
initControllerOnce.Do(func() {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
Auth = NewAuthController(db)
HealthCheck = NewHealthCheckController()
GetTask = NewGetTaskController(cfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
GetTask = NewGetTaskController(cfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, db, reg)
})
}

View File

@@ -13,7 +13,6 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -23,9 +22,9 @@ type GetTaskController struct {
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, vf.BatchVK, reg)
func NewGetTaskController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -41,7 +40,7 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -49,7 +48,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
nerr := fmt.Errorf("parameter wrong proof type")
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}

View File

@@ -13,8 +13,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
coodinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
@@ -23,18 +22,18 @@ type SubmitProofController struct {
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, reg),
}
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var spp coordinatorType.SubmitProofParameter
var spp coodinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&spp); err != nil {
nerr := fmt.Errorf("parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -52,7 +51,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
@@ -60,7 +59,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
@@ -69,8 +68,8 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
return
}
coordinatorType.RenderJSON(ctx, types.Success, nil, nil)
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
}

View File

@@ -29,11 +29,10 @@ type Collector struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
}
// NewCollector create a collector to cron collect the data to send to prover
@@ -63,15 +62,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
Name: "coordinator_chunk_prover_task_timeout_total",
Help: "Total number of chunk timeout prover task.",
}),
checkBatchAllChunkReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_check_batch_all_chunk_ready_run_total",
Help: "Total number of check batch all chunks ready total",
}),
}
go c.timeoutBatchProofTask()
go c.timeoutChunkProofTask()
go c.checkBatchAllChunkReady()
log.Info("Start coordinator successfully.")
@@ -85,6 +79,7 @@ func (c *Collector) Stop() {
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
defer func() {
if err := recover(); err != nil {
@@ -154,7 +149,7 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
for _, assignedProverTask := range assignedProverTasks {
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, message.ProofType(assignedProverTask.TaskType), assignedProverTask.TaskID) {
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
}
@@ -194,60 +189,3 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
}
}
}
func (c *Collector) checkBatchAllChunkReady() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("check batch all chunk ready panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
c.checkBatchAllChunkReadyRunTotal.Inc()
page := 1
pageSize := 50
for {
offset := (page - 1) * pageSize
batches, err := c.batchOrm.GetUnassignedAndChunksUnreadyBatches(c.ctx, offset, pageSize)
if err != nil {
log.Warn("checkBatchAllChunkReady GetUnassignedAndChunksUnreadyBatches", "error", err)
break
}
for _, batch := range batches {
allReady, checkErr := c.chunkOrm.CheckIfBatchChunkProofsAreReady(c.ctx, batch.Hash)
if checkErr != nil {
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", batch.Hash)
continue
}
if !allReady {
continue
}
if updateErr := c.batchOrm.UpdateChunkProofsStatusByBatchHash(c.ctx, batch.Hash, types.ChunkProofsStatusReady); updateErr != nil {
log.Warn("checkBatchAllChunkReady UpdateChunkProofsStatusByBatchHash failure", "error", checkErr, "hash", batch.Hash)
}
}
if len(batches) < pageSize {
break
}
page++
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}

View File

@@ -25,14 +25,13 @@ import (
// BatchProverTask is prover task implement for batch proof
type BatchProverTask struct {
BaseProverTask
vk string
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal prometheus.Counter
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
@@ -41,7 +40,6 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
@@ -70,16 +68,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != bp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
}
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
@@ -101,8 +91,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if len(batchTasks) != 1 {
log.Error("get unassigned batch proving task len not 1", "length", len(batchTasks), "batch tasks", batchTasks)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
}
batchTask := batchTasks[0]
@@ -110,9 +99,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
bp.batchAttemptsExceedTotal.Inc()
// TODO: retry fetching unassigned batch proving task
log.Error("batch task proving attempts reach the maximum", "hash", batchTask.Hash)
return nil, nil
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
proverTask := orm.ProverTask{
@@ -130,15 +117,13 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
bp.recoverProvingStatus(ctx, batchTask)
log.Error("db set session info fail", "task hash", batchTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
}
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
if err != nil {
bp.recoverProvingStatus(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
}
bp.batchTaskGetTaskTotal.Inc()

View File

@@ -22,20 +22,16 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
vk string
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal prometheus.Counter
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
@@ -44,7 +40,7 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
@@ -73,15 +69,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != cp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
@@ -97,8 +85,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
if err != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
}
if len(chunkTasks) == 0 {
@@ -106,8 +93,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if len(chunkTasks) != 1 {
log.Error("get unassigned chunk proving task len not 1", "length", len(chunkTasks), "chunk tasks", chunkTasks)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
}
chunkTask := chunkTasks[0]
@@ -116,9 +102,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
cp.chunkAttemptsExceedTotal.Inc()
// TODO: retry fetching unassigned chunk proving task
log.Error("chunk task proving attempts reach the maximum", "hash", chunkTask.Hash)
return nil, nil
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
proverTask := orm.ProverTask{
@@ -134,15 +118,13 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
log.Error("db set session info fail", "task hash", chunkTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
}
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
if err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
}
cp.chunkTaskGetTaskTotal.Inc()

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/gin-gonic/gin"
@@ -34,12 +33,6 @@ var (
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
)
// ProofReceiverLogic the proof receiver logic
@@ -66,7 +59,11 @@ type ProofReceiverLogic struct {
}
// NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg prometheus.Registerer) *ProofReceiverLogic {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -131,11 +128,11 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return fmt.Errorf("get public key from context failed")
}
pv := ctx.GetString(coordinatorType.ProverVersion)
if len(pv) == 0 {
if len(pk) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
@@ -151,7 +148,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return err
}
m.verifierTotal.WithLabelValues(pv).Inc()
proverVersion := ctx.GetString(coordinatorType.ProverVersion)
m.verifierTotal.WithLabelValues(proverVersion).Inc()
var success bool
var verifyErr error
@@ -162,16 +160,16 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
}
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.verifierFailureTotal.WithLabelValues(proverVersion).Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
if verifyErr == nil {
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
}
return ErrValidatorSuccessInvalidProof
return verifyErr
}
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
@@ -182,7 +180,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
return ErrCoordinatorInternalFailure
return err
}
return nil
@@ -234,19 +232,12 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
proofTimeSec := uint64(proofTime.Seconds())
if proofMsg.Status != message.StatusOk {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
// Verify if the proving task has already been assigned to another prover.
// Upon receiving an error message, it's possible the proving status has been reset by another prover
// and the task has been reassigned. In this case, the coordinator should avoid resetting the proving status.
m.processProverErr(ctx, proofMsg.ID, pk, proofMsg.Type)
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", "failureMessage", failureMsg)
"taskType", proofMsg.Type, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", pk, "failureType", proofParameter.FailureType, "failureMessage", proofParameter.FailureMsg)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -274,6 +265,15 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return nil
}
//func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
// log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
// "proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
//
// if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
// log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
// }
//}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
@@ -310,8 +310,8 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", hash, "public key", proverPublicKey)
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
return nil
}
@@ -378,33 +378,6 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk string, taskType message.ProofType) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, taskType, taskID, pk, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
}
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
if err != nil {
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
return
}
if len(proverTasks) > 0 {
return
}
switch taskType {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
}
}
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte

View File

@@ -8,6 +8,11 @@ import (
"scroll-tech/coordinator/internal/config"
)
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a mock halo2 verifier.
type Verifier struct{}
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil

View File

@@ -1,15 +0,0 @@
package verifier
import (
"scroll-tech/coordinator/internal/config"
)
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
BatchVK string
ChunkVK string
}

View File

@@ -11,11 +11,7 @@ package verifier
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/json"
"io"
"os"
"path"
"unsafe"
"github.com/scroll-tech/go-ethereum/log"
@@ -25,6 +21,14 @@ import (
"scroll-tech/common/types/message"
)
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
}
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
@@ -40,21 +44,7 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
return &Verifier{
cfg: cfg,
BatchVK: batchVK,
ChunkVK: chunkVK,
}, nil
return &Verifier{cfg: cfg}, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
@@ -106,15 +96,3 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
verified := C.verify_chunk_proof(proofStr)
return verified != 0, nil
}
func readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
}
byt, err := io.ReadAll(f)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(byt), nil
}

View File

@@ -91,31 +91,11 @@ func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch,
return batches, nil
}
// GetUnassignedAndChunksUnreadyBatches get the batches which is unassigned and chunks is not ready
func (o *Batch) GetUnassignedAndChunksUnreadyBatches(ctx context.Context, offset, limit int) ([]*Batch, error) {
if offset < 0 || limit < 0 {
return nil, errors.New("limit and offset must not be smaller than 0")
}
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
db = db.Where("chunk_proofs_status = ?", types.ChunkProofsStatusPending)
db = db.Order("index ASC")
db = db.Offset(offset)
db = db.Limit(limit)
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedAndChunksUnreadyBatches error: %w", err)
}
return batches, nil
}
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned.
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)})
var assignedBatches []*Batch
if err := db.Find(&assignedBatches).Error; err != nil {
@@ -256,7 +236,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
@@ -270,30 +250,6 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil
}
// UpdateProvingStatusFromProverError updates batch proving status when prover prove failed
func (o *Batch) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatusOptimistic error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateProofByHash updates the batch proof by hash.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -155,11 +155,11 @@ func (o *Chunk) GetProvingStatusByHash(ctx context.Context, hash string) (types.
return types.ProvingStatus(chunk.ProvingStatus), nil
}
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned.
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)})
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
@@ -285,7 +285,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
@@ -302,29 +302,6 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil
}
// UpdateProvingStatusFromProverError updates chunk proving status when prover prove failed
func (o *Chunk) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusOptimistic error: %w, chunk hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateProofByHash updates the chunk proof by hash.
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.ChunkProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -12,7 +12,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
@@ -68,7 +67,6 @@ func TestProverTaskOrm(t *testing.T) {
reward.SetString("18446744073709551616", 10) // 1 << 64, uint64 maximum 1<<64 -1
proverTask := ProverTask{
TaskType: int16(message.ProofTypeChunk),
TaskID: "test-hash",
ProverName: "prover-0",
ProverPublicKey: "0",
@@ -79,7 +77,7 @@ func TestProverTaskOrm(t *testing.T) {
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
@@ -93,7 +91,7 @@ func TestProverTaskOrm(t *testing.T) {
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
@@ -108,7 +106,6 @@ func TestProverTaskOrmUint256(t *testing.T) {
rewardUint256 := big.NewInt(0)
rewardUint256.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10)
proverTask := ProverTask{
TaskType: int16(message.ProofTypeChunk),
TaskID: "test-hash",
ProverName: "prover-0",
ProverPublicKey: "0",
@@ -119,7 +116,7 @@ func TestProverTaskOrmUint256(t *testing.T) {
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasksUint256))
resultRewardUint256 := proverTasksUint256[0].Reward.BigInt()

View File

@@ -96,14 +96,13 @@ func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]inter
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
// The returned prover task objects are sorted in ascending order by their ids.
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType message.ProofType, hashes []string) ([]*ProverTask, error) {
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
if len(hashes) == 0 {
return nil, nil
}
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id IN ?", hashes)
db = db.Order("id asc")
@@ -115,10 +114,9 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
}
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key", proverPublicKey)
db = db.Where("prover_version", proverVersion)
@@ -131,28 +129,11 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskTyp
return &proverTask, nil
}
// GetAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
func (o *ProverTask) GetAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key != ?", proverPublicKey)
db = db.Where("proving_status = ?", int(types.ProverAssigned))
var proverTasks []ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, fmt.Errorf("ProverTask.GetAssignedProverTask error: %w, taskID: %v", err, taskID)
}
return proverTasks, nil
}
// GetProvingStatusByTaskID retrieves the proving status of a prover task
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskType message.ProofType, taskID string) (types.ProverProveStatus, error) {
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string) (types.ProverProveStatus, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Select("proving_status")
db = db.Where("task_type", int(taskType))
db = db.Where("task_id = ?", taskID)
var proverTask ProverTask
@@ -180,10 +161,9 @@ func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit in
}
// TaskTimeoutMoreThanOnce get the timeout twice task. a temp design
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskType message.ProofType, taskID string) bool {
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskID string) bool {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("proving_status", int(types.ProverProofInvalid))

View File

@@ -2,9 +2,8 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
ProverHeight int `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`
ProverHeight int `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
}
// GetTaskSchema the schema data return to prover for get prover task

View File

@@ -110,7 +110,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
}
func setEnv(t *testing.T) {
version.Version = "v4.1.98-aaa-bbb-ccc"
version.Version = "v1.2.3-aaa-bbb-ccc"
base = docker.NewDockerApp()
base.RunDBImage(t)
@@ -386,9 +386,9 @@ func testProofGeneratedFailed(t *testing.T) {
return
}
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash)
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
return

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 8, int(cur))
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -39,7 +39,7 @@ create table chunk
);
comment
on column chunk.proving_status is 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
on column chunk.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
create unique index chunk_index_uindex
on chunk (index) where deleted_at IS NULL;

View File

@@ -50,7 +50,7 @@ comment
on column batch.chunk_proofs_status is 'undefined, pending, ready';
comment
on column batch.proving_status is 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
on column batch.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
comment
on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, commit_failed, finalize_failed';

View File

@@ -1,15 +1,15 @@
-- +goose Up
-- +goose StatementBegin
drop index if exists l1_message_hash_uindex;
drop index l1_message_hash_uindex;
create index if not exists l1_message_hash_index
create index l1_message_hash_index
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists l1_message_hash_index;
drop index l1_message_hash_index;
create unique index if not exists l1_message_hash_uindex
create unique index l1_message_hash_uindex
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -37,11 +37,8 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
SetBaseURL(cfg.BaseURL).
AddRetryCondition(func(r *resty.Response, _ error) bool {
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
if r.StatusCode() >= http.StatusInternalServerError {
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return true
}
return false
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return r.StatusCode() >= http.StatusInternalServerError
})
log.Info("successfully initialized prover client",
@@ -179,24 +176,23 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
if err != nil {
log.Error("submit proof request failed: %v", err)
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
return fmt.Errorf("submit proof request failed: %w", ConnectErr)
}
if resp.StatusCode() != 200 {
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
return fmt.Errorf("failed to submit proof, status code not 200: %w", ConnectErr)
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
log.Error("JWT expired, re-login failed: %v", err)
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
return fmt.Errorf("JWT expired, re-login failed: %w", ConnectErr)
}
log.Info("re-login success")
return c.SubmitProof(ctx, req)
}
if result.ErrCode != types.Success {
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}

View File

@@ -6,8 +6,7 @@ import (
"scroll-tech/common/types/message"
)
// ErrCoordinatorConnect connect to coordinator error
var ErrCoordinatorConnect = errors.New("connect coordinator error")
var ConnectErr = errors.New("connect coordinator error")
// ChallengeResponse defines the response structure for random API
type ChallengeResponse struct {
@@ -43,7 +42,6 @@ type LoginResponse struct {
type GetTaskRequest struct {
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`
}
// GetTaskResponse defines the response structure for GetTask API

View File

@@ -5,7 +5,6 @@
"db_path": "unique-db-path-for-prover-1",
"core": {
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
},
"coordinator": {

View File

@@ -25,7 +25,6 @@ type Config struct {
// ProverCoreConfig load zk prover config.
type ProverCoreConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ProofType message.ProofType `json:"proof_type,omitempty"` // 1: chunk prover (default type), 2: batch prover
DumpDir string `json:"dump_dir,omitempty"`
}

View File

@@ -42,7 +42,3 @@ func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo,
Vk: _empty[:],
}, nil
}
func (p *ProverCore) GetVk() string {
return ""
}

View File

@@ -28,22 +28,19 @@ import (
// ProverCore sends block-traces to rust-prover through ffi and get back the zk-proof.
type ProverCore struct {
cfg *config.ProverCoreConfig
vk string
}
// NewProverCore inits a ProverCore object.
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
defer func() {
C.free(unsafe.Pointer(paramsPathStr))
C.free(unsafe.Pointer(assetsPathStr))
}()
if cfg.ProofType == message.ProofTypeBatch {
C.init_batch_prover(paramsPathStr, assetsPathStr)
C.init_batch_prover(paramsPathStr)
} else if cfg.ProofType == message.ProofTypeChunk {
C.init_chunk_prover(paramsPathStr, assetsPathStr)
C.init_chunk_prover(paramsPathStr)
}
if cfg.DumpDir != "" {
@@ -57,26 +54,6 @@ func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
return &ProverCore{cfg: cfg}, nil
}
// GetVk get Base64 format of vk.
func (p *ProverCore) GetVk() string {
if p.vk != "" { // cached
return p.vk
}
var raw *C.char
if p.cfg.ProofType == message.ProofTypeBatch {
raw = C.get_batch_vk()
} else if p.cfg.ProofType == message.ProofTypeChunk {
raw = C.get_chunk_vk()
}
if raw != nil {
p.vk = C.GoString(raw) // cache it
}
return p.vk
}
// ProveBatch call rust ffi to generate batch proof.
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
if p.cfg.ProofType != message.ProofTypeBatch {
@@ -91,11 +68,6 @@ func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo,
if err != nil {
return nil, err
}
if !p.checkChunkProofs(chunkProofsByt) {
return nil, fmt.Errorf("Non-match chunk protocol: task-id = %s", taskID)
}
proofByt := p.proveBatch(chunkInfosByt, chunkProofsByt)
err = p.mayDumpProof(taskID, proofByt)
@@ -140,20 +112,6 @@ func (p *ProverCore) TracesToChunkInfo(traces []*types.BlockTrace) (*message.Chu
return chunkInfo, json.Unmarshal(chunkInfoByt, chunkInfo)
}
func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) bool {
chunkProofsStr := C.CString(string(chunkProofsByt))
defer func() {
C.free(unsafe.Pointer(chunkProofsStr))
}()
log.Info("Start to check chunk proofs ...")
valid := C.check_chunk_proofs(chunkProofsStr)
log.Info("Finish checking chunk proofs!")
return valid != 0
}
func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) []byte {
chunkInfosStr := C.CString(string(chunkInfosByt))
chunkProofsStr := C.CString(string(chunkProofsByt))

View File

@@ -4,7 +4,6 @@
package core_test
import (
"encoding/base64"
"encoding/json"
"flag"
"io"
@@ -23,12 +22,9 @@ import (
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
tracePath2 = flag.String("trace2", "/assets/traces/10_transfer.json", "chunk trace 2")
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
)
func TestFFI(t *testing.T) {
@@ -37,7 +33,6 @@ func TestFFI(t *testing.T) {
chunkProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeChunk,
}
chunkProverCore, err := core.NewProverCore(chunkProverConfig)
@@ -88,13 +83,9 @@ func TestFFI(t *testing.T) {
as.NoError(err)
t.Log("Generated and dumped chunk proof 2")
as.Equal(chunkProverCore.GetVk(), readVk(*chunkVkPath, as))
t.Log("Chunk VKs are equal")
batchProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeBatch,
}
batchProverCore, err := core.NewProverCore(batchProverConfig)
@@ -105,9 +96,6 @@ func TestFFI(t *testing.T) {
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
as.NoError(err)
t.Log("Generated and dumped batch proof")
as.Equal(batchProverCore.GetVk(), readVk(*batchVkPath, as))
t.Log("Batch VKs are equal")
}
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
@@ -121,12 +109,3 @@ func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
return []*types.BlockTrace{trace}
}
func readVk(filePat string, as *assert.Assertions) string {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
return base64.StdEncoding.EncodeToString(byt)
}

View File

@@ -162,7 +162,7 @@ func (r *Prover) proveAndSubmit() error {
proofMsg, err = r.prove(task)
if err != nil { // handling error from prove
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
return r.submitErr(task, message.ProofFailureNoPanic, err)
return r.submitErr(task, true, message.ProofFailureNoPanic, err)
}
return r.submitProof(proofMsg)
}
@@ -177,9 +177,6 @@ func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.GetVk(),
}
if req.TaskType == message.ProofTypeChunk {
@@ -325,7 +322,7 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
// send the submit request
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
}
@@ -333,15 +330,12 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
return fmt.Errorf("error submitting proof: %v", err)
}
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
}
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
return nil
}
func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.ProofFailureType, err error) error {
func (r *Prover) submitErr(task *store.ProvingTask, isRetry bool, proofFailureType message.ProofFailureType, err error) error {
// prepare the submit request
req := &client.SubmitProofRequest{
TaskID: task.Task.ID,
@@ -354,16 +348,13 @@ func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.Pro
// send the submit request
if submitErr := r.coordinatorClient.SubmitProof(r.ctx, req); submitErr != nil {
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
}
}
return fmt.Errorf("error submitting proof: %v", submitErr)
}
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
}
log.Info("proof submitted report failure successfully",
"task-id", task.Task.ID, "task-type", task.Task.Type,

View File

@@ -51,7 +51,7 @@ func TestMain(m *testing.M) {
}
func TestCoordinatorProverInteraction(t *testing.T) {
// Start postgres docker containers
// Start postgres docker containers.
base.RunL2Geth(t)
base.RunDBImage(t)