Compare commits

..

29 Commits

Author SHA1 Message Date
georgehao
1dfca3b7c0 feat(coordinator): prover task record unique (#845)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-28 10:51:01 +08:00
colin
826e847b5a fix(rollup-relayer): determine first block height and first chunk index (#861)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-26 15:04:52 +08:00
colin
8c71a6d22a fix(chunk-proposer): count l1+l2 txs into chunk (#879)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-26 14:53:59 +08:00
Péter Garamvölgyi
87f18efba8 fix: limit DB query result count in chunk and batch proposer (#878)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-25 12:04:41 +02:00
Xi Lin
fecd129a39 fix(contracts): OZ-L08 Redundancy of Replaying Messages in L2ScrollMessenger (#850)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-25 16:25:52 +08:00
Xi Lin
663156984f fix(contracts): OZ-N08 Inconsistent Order of Event Emissions (#876)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-25 15:51:52 +08:00
Xi Lin
3499c595e7 fix(contracts): OZ-N07 Incorrect Function Visibility (#875)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-25 15:03:15 +08:00
Xi Lin
95d2df46e3 fix(contracts): OZ-N14 Duplicate Imports (#877)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-25 14:21:59 +08:00
Xi Lin
102d29c54d docs(contracts): OZ-N06 Inconsistent Coding Style (#874)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-08-25 11:59:20 +08:00
Xi Lin
7d50699344 fix(contracts): OZ-L07 Potentially Misleading Verifier Event (#849)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-25 10:45:35 +08:00
Xi Lin
e08b800d1d fix(contracts): OZ-L06 Possible Misleading revert Message When Swapping Non-ERC20Permit Tokens (#847)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-25 10:23:02 +08:00
Xi Lin
6139ca0df0 fix(contracts): OZ-L10 maxReplayTimes is Not Initialized in L1ScrollMessenger (#852)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-25 10:10:31 +08:00
colin
24a0fd08ac fix(rollup-relayer): chunk and batch proposer metrics (#873)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-24 19:53:15 +02:00
Steven
2840485f38 fix: update scroll-prover to v0.7.5 (#872)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-24 23:21:03 +08:00
colin
dab21fc712 refactor(coordinator): omit coordinator's error details to prover & add logs (#863)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-24 22:59:33 +08:00
HAOYUatHZ
c44b7f7bf4 fix(coordinator): fix TestApis (#870) 2023-08-24 16:05:53 +08:00
HAOYUatHZ
a8c71b5e36 feat(coordinator): bump hardcoded tag check (#869)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-24 15:55:27 +08:00
georgehao
ae2f62df00 fix(coordinator): fix GetAssignedTaskOfOtherProvers (#868)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-08-24 15:25:16 +08:00
georgehao
ce5c6e0aa3 feat(coordinator): fix all chunks ready bug (#862)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-24 15:20:03 +08:00
georgehao
e8ddf99184 feat(coordinator):fix recover status bug (#866)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-08-24 12:21:29 +08:00
Steven
ebf2b429a3 fix: update scroll-prover to v0.7.3 (#865)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-08-24 07:03:42 +08:00
HAOYUatHZ
db46ce408d feat(coordinator): hardcode tag validation (#864)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-23 21:37:29 +08:00
HAOYUatHZ
3d1a8374d0 feat(coordinator): check vk (#859)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-23 17:12:39 +08:00
yqrashawn
574aa68491 ci: failed to create commit in bump version action (#860)
Co-authored-by: yqrashawn <yqrashawn@users.noreply.github.com>
2023-08-23 14:49:41 +08:00
Haichen Shen
589388b288 fix(rollup): increase the batch proposer frequency (#858)
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
2023-08-23 12:03:08 +08:00
Steven
af8175800f fix: update libzkp to use scroll-prover v0.7.2 (#807)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-08-23 11:48:27 +08:00
vyzo
e6793a85f5 test: Increase test coverage in block/chunk encoding (#830)
Co-authored-by: vyzo <vyzo@users.noreply.github.com>
2023-08-22 08:50:17 -07:00
colin
7e9f3b7376 fix(rollup-relayer): estimate l1 commit calldata size (#848)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2023-08-22 13:09:28 +02:00
Péter Garamvölgyi
8b611b443a fix: only create/drop index if exists in migration (#856)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2023-08-22 11:27:49 +02:00
73 changed files with 964 additions and 433 deletions

View File

@@ -15,6 +15,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
@@ -49,7 +51,11 @@ jobs:
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)

View File

@@ -64,7 +64,7 @@
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
},
"chunk_proposer_config": {
"max_l2_tx_num_per_chunk": 1123,
"max_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,

View File

@@ -2,6 +2,7 @@ package config
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
@@ -15,6 +16,13 @@ type Config struct {
DBConfig *database.Config `json:"db_config"`
}
func (c *Config) validate() error {
if maxChunkPerBatch := c.L2Config.BatchProposerConfig.MaxChunkNumPerBatch; maxChunkPerBatch <= 0 {
return fmt.Errorf("Invalid max_chunk_num_per_batch configuration: %v", maxChunkPerBatch)
}
return nil
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
@@ -28,5 +36,8 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
if err := cfg.validate(); err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -28,7 +28,7 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`

View File

@@ -226,7 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -148,7 +148,12 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
}
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
if err != nil {
return nil, err
}
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch)+1)
if err != nil {
return nil, err
}
@@ -183,6 +188,10 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
}
for i, chunk := range dbChunks {
// metric values
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalL1CommitGas := totalL1CommitGas
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
@@ -230,9 +239,9 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
p.totalL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(i))
return dbChunks[:i], nil
}
}

View File

@@ -23,7 +23,7 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -39,11 +39,6 @@ func testBatchProposer(t *testing.T) {
}, db, nil)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
assert.NoError(t, err)
assert.Empty(t, chunks)
batchOrm := orm.NewBatch(db)
// get all batches.
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -54,6 +49,7 @@ func testBatchProposer(t *testing.T) {
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
chunkOrm := orm.NewChunk(db)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)

View File

@@ -18,6 +18,10 @@ import (
"scroll-tech/bridge/internal/orm"
)
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
// Normally we will pack much fewer blocks because of other limits.
const maxNumBlockPerChunk int = 100
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
type chunkRowConsumption map[string]uint64
@@ -51,7 +55,7 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxL2TxNumPerChunk uint64
maxTxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
@@ -62,7 +66,7 @@ type ChunkProposer struct {
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkL2TxNum prometheus.Gauge
chunkTxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalTxGasUsed prometheus.Gauge
@@ -75,7 +79,7 @@ type ChunkProposer struct {
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
@@ -86,7 +90,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
@@ -109,9 +113,9 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
Name: "bridge_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_l2_tx_num",
Help: "The chunk l2 tx num",
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_tx_num",
Help: "The chunk tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
@@ -182,7 +186,12 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
}
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
if err != nil {
return nil, err
}
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, maxNumBlockPerChunk)
if err != nil {
return nil, err
}
@@ -193,14 +202,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalTxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
crc := chunkRowConsumption{}
for i, block := range blocks {
// metric values
lastTotalTxNum := totalTxNum
lastTotalL1CommitGas := totalL1CommitGas
lastCrcMax := crc.max()
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalTxGasUsed := totalTxGasUsed
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalTxNum += uint64(len(block.Transactions))
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
@@ -209,19 +225,19 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxNum > p.maxTxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
if totalTxNum > p.maxTxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
totalTxNum,
p.maxTxNumPerChunk,
)
}
@@ -255,21 +271,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"totalTxNum", totalTxNum,
"maxTxNumPerChunk", p.maxTxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"chunkRowConsumptionMax", crcMax,
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crcMax))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkTxNum.Set(float64(lastTotalTxNum))
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(lastCrcMax))
p.totalTxGasUsed.Set(float64(lastTotalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
@@ -284,7 +300,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkTxNum.Set(float64(totalTxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))

View File

@@ -23,7 +23,7 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -38,7 +38,7 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
@@ -53,7 +53,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 0, // !
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 0)
}

View File

@@ -142,6 +142,21 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
return &latestBatch, nil
}
// GetFirstUnbatchedChunkIndex retrieves the first unbatched chunk index.
func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error) {
// Get the latest batch
latestBatch, err := o.GetLatestBatch(ctx)
if err != nil {
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
// which means there is not batched chunk yet, thus returns 0
if latestBatch == nil {
return 0, nil
}
return latestBatch.EndChunkIndex + 1, nil
}
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {

View File

@@ -87,20 +87,6 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
return chunks, nil
}
// GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
}
return chunks, nil
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
@@ -114,6 +100,40 @@ func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
return &latestChunk, nil
}
// GetUnchunkedBlockHeight retrieves the first unchunked block number.
func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
// Get the latest chunk
latestChunk, err := o.GetLatestChunk(ctx)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// if there is no chunk, return block number 1,
// because no need to chunk genesis block number
return 1, nil
}
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
return latestChunk.EndBlockNumber + 1, nil
}
// GetChunksGEIndex retrieves chunks that have a chunk index greater than the or equal to the given index.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ?", index)
db = db.Order("index ASC")
if limit > 0 {
db = db.Limit(limit)
}
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksGEIndex error: %w", err)
}
return chunks, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -151,7 +171,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL1CommitCalldataSize uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
}

View File

@@ -64,18 +64,23 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
return maxNumber, nil
}
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// GetL2WrappedBlocksGEHeight retrieves L2 blocks that have a block number greater than or equal to the given height.
// The blocks are converted into WrappedBlock format for output.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
func (o *L2Block) GetL2WrappedBlocksGEHeight(ctx context.Context, height uint64, limit int) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root, row_consumption")
db = db.Where("chunk_hash IS NULL")
db = db.Where("number >= ?", height)
db = db.Order("number ASC")
if limit > 0 {
db = db.Limit(limit)
}
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
var wrappedBlocks []*types.WrappedBlock
@@ -83,18 +88,18 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)

View File

@@ -101,25 +101,26 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
blocks, err := l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
assert.Equal(t, "", blocks[0].ChunkHash)
assert.Equal(t, "", blocks[1].ChunkHash)
blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
wrappedBlocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
assert.Equal(t, wrappedBlock1, wrappedBlocks[0])
assert.Equal(t, wrappedBlock2, wrappedBlocks[1])
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
assert.NoError(t, err)
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background())
blocks, err = l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 1)
assert.Equal(t, wrappedBlock2, blocks[0])
assert.Len(t, blocks, 2)
assert.Equal(t, "test hash", blocks[0].ChunkHash)
assert.Equal(t, "", blocks[1].ChunkHash)
}
func TestChunkOrm(t *testing.T) {
@@ -135,11 +136,13 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
@@ -156,9 +159,13 @@ func TestChunkOrm(t *testing.T) {
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background())
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
func TestBatchOrm(t *testing.T) {

View File

@@ -58,7 +58,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -66,8 +66,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
}, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
@@ -81,7 +85,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l2Relayer.ProcessPendingBatches()
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"eth-types",
"ethers-core",
@@ -1049,7 +1049,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1226,7 +1226,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1479,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -2077,7 +2077,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2264,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2279,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,8 +2754,8 @@ dependencies = [
[[package]]
name = "prover"
version = "0.6.6"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.6#282ea596fd6c7a113cfb3a3ef3bf3f6d5f1b0768"
version = "0.7.5"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
dependencies = [
"aggregator",
"anyhow",
@@ -3624,7 +3624,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "snark-verifier"
version = "0.1.0"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
dependencies = [
"bytes",
"ethereum-types 0.14.1",
@@ -3648,7 +3648,7 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.0.1"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
dependencies = [
"bincode",
"env_logger 0.10.0",
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.6.6"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.6#282ea596fd6c7a113cfb3a3ef3bf3f6d5f1b0768"
version = "0.7.5"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4491,7 +4491,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.6#24e2cf4efb1c420a46a579d47f38a42b3487c092"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
dependencies = [
"array-init",
"bus-mapping",
@@ -4536,6 +4536,7 @@ dependencies = [
name = "zkp"
version = "0.1.0"
dependencies = [
"base64 0.13.1",
"env_logger 0.9.3",
"halo2_proofs",
"libc",

View File

@@ -19,23 +19,19 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[patch."https://github.com/scroll-tech/snark-verifier"]
snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.6" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.6" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.8.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
libc = "0.2"
once_cell = "1.8.0"
[profile.test]
opt-level = 3

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, ChunkHash, ChunkProof,
};
use std::{cell::OnceCell, panic, ptr::null};
use std::{cell::OnceCell, env, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,11 +13,15 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let prover = Prover::from_params_dir(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
@@ -30,11 +34,35 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> c_char {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert!(!chunk_proofs.is_empty());
let valid = panic::catch_unwind(|| PROVER.get().unwrap().check_chunk_proofs(&chunk_proofs));
valid.unwrap_or(false) as c_char
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
utils::init_env_and_log,
zkevm::{Prover, Verifier},
ChunkProof,
};
use std::{cell::OnceCell, panic, ptr::null};
use std::{cell::OnceCell, env, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,10 +13,14 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
@@ -30,11 +34,24 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {

View File

@@ -19,6 +19,10 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}

View File

@@ -1,10 +1,13 @@
void init_batch_prover(char* params_dir);
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
void init_chunk_prover(char* params_dir);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);

View File

@@ -3,11 +3,13 @@ package types
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
@@ -22,9 +24,10 @@ func GetKeccak256Gas(size uint64) uint64 {
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
txPayloadLengthCache map[string]uint64
}
// NumL1Messages returns the number of L1 messages in this block.
@@ -95,7 +98,8 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
size += 64 // 60 bytes BlockContext + 4 bytes payload length
size += w.getTxPayloadLength(txData)
}
return size
}
@@ -110,22 +114,9 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
txPayloadLength := w.getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // size of a uint32 field
total += CalldataNonZeroByteGas * 64 // 60 bytes BlockContext + 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
}
@@ -139,13 +130,47 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
if w.txPayloadLengthCache == nil {
w.txPayloadLengthCache = make(map[string]uint64)
}
return count
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
return length
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
if err != nil {
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
return 0
}
txPayloadLength := uint64(len(rlpTxData))
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
return txPayloadLength
}
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
}
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
}
return rlpTxData, nil
}

View File

@@ -8,7 +8,6 @@ import (
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
)
@@ -65,23 +64,7 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
if txData.Type == types.L1MessageTxType {
continue
}
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, err
}
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
rlpTxData, err := convertTxDataToRLPEncoding(txData)
if err != nil {
return nil, err
}

View File

@@ -38,11 +38,15 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6966), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -56,11 +60,15 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -75,6 +83,8 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -136,3 +146,81 @@ func TestChunkHash(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}
func TestErrorPaths(t *testing.T) {
// test 1: Header.Number is not a uint64
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
bytes, err := wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "block number is not uint64")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
for i := 0; i < 65537; i++ {
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
}
bytes, err = wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
chunk := &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
wrappedBlock.Transactions[0].Data = "not-a-hex"
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
_, err = chunk.Hash(0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid byte")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
for i := 0; i < 65535; i++ {
tx := &wrappedBlock2.Transactions[i]
txCopy := *tx
txCopy.Nonce = uint64(i + 1)
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
}
bytes, err = wrappedBlock2.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
}

View File

@@ -211,6 +211,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
UUID string `json:"uuid"`
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`

View File

@@ -3,10 +3,11 @@ package version
import (
"fmt"
"runtime/debug"
"strconv"
"strings"
)
var tag = "v4.1.89"
var tag = "v4.2.0"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -44,7 +45,41 @@ func CheckScrollProverVersion(proverVersion string) bool {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2] || // libzkp v0.6.6
remote[2] == "ccb3cd4" || // libzkp v0.6.5
remote[2] == "8c439b1" // libzkp v0.6.2
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor != 4 {
return false
}
if remoteTagMinor != 1 {
return false
}
if remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -12,6 +12,7 @@ import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
@@ -71,6 +72,9 @@ contract InitializeL1BridgeContracts is Script {
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
// initialize MultipleVersionRollupVerifier
MultipleVersionRollupVerifier(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR).initialize(L1_SCROLL_CHAIN_PROXY_ADDR);
// initialize L2GasPriceOracle
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(
21000, // _txGas

View File

@@ -98,6 +98,9 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
rollup = _rollup;
messageQueue = _messageQueue;
maxReplayTimes = 3;
emit UpdateMaxReplayTimes(0, 3);
}
/*****************************

View File

@@ -14,8 +14,6 @@ import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
// solhint-disable no-empty-blocks
abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, ScrollGatewayBase {
using SafeERC20Upgradeable for IERC20Upgradeable;

View File

@@ -52,7 +52,7 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
address _to,
uint256 _amount,
uint256 _gasLimit
) public payable override {
) external payable override {
_deposit(_to, _amount, new bytes(0), _gasLimit);
}

View File

@@ -28,6 +28,9 @@ interface IScrollChain {
* Public View Functions *
*************************/
/// @notice The latest finalized batch index.
function lastFinalizedBatchIndex() external view returns (uint256);
/// @notice Return the batch hash of a committed batch.
/// @param batchIndex The index of the batch.
function committedBatches(uint256 batchIndex) external view returns (bytes32);

View File

@@ -4,6 +4,7 @@ pragma solidity =0.8.16;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {IScrollChain} from "./IScrollChain.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
@@ -38,6 +39,9 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @notice The lastest used zkevm verifier.
Verifier public latestVerifier;
/// @notice The address of ScrollChain contract.
address public scrollChain;
/***************
* Constructor *
***************/
@@ -48,6 +52,12 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
latestVerifier.verifier = _verifier;
}
function initialize(address _scrollChain) external onlyOwner {
require(scrollChain == address(0), "initialized");
scrollChain = _scrollChain;
}
/*************************
* Public View Functions *
*************************/
@@ -101,6 +111,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @param _startBatchIndex The start batch index when the verifier will be used.
/// @param _verifier The address of new verifier.
function updateVerifier(uint64 _startBatchIndex, address _verifier) external onlyOwner {
require(_startBatchIndex > IScrollChain(scrollChain).lastFinalizedBatchIndex(), "start batch index finalized");
Verifier memory _latestVerifier = latestVerifier;
require(_startBatchIndex >= _latestVerifier.startBatchIndex, "start batch index too small");
require(_verifier != address(0), "zero verifier address");

View File

@@ -67,8 +67,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @notice Whether an account is a prover.
mapping(address => bool) public isProver;
/// @notice The latest finalized batch index.
uint256 public lastFinalizedBatchIndex;
/// @inheritdoc IScrollChain
uint256 public override lastFinalizedBatchIndex;
/// @inheritdoc IScrollChain
mapping(uint256 => bytes32) public override committedBatches;
@@ -272,9 +272,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
require(_batchIndex > lastFinalizedBatchIndex, "can only revert unfinalized batch");
while (_count > 0) {
committedBatches[_batchIndex] = bytes32(0);
emit RevertBatch(_batchIndex, _batchHash);
committedBatches[_batchIndex] = bytes32(0);
unchecked {
_batchIndex += 1;
_count -= 1;

View File

@@ -40,12 +40,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
/// @notice Mapping from L1 message hash to a boolean value indicating if the message has been successfully executed.
mapping(bytes32 => bool) public isL1MessageExecuted;
/// @notice Mapping from L1 message hash to the number of failure times.
mapping(bytes32 => uint256) public l1MessageFailedTimes;
/// @notice The maximum number of times each L1 message can fail on L2.
uint256 public maxFailedExecutionTimes;
/***************
* Constructor *
***************/
@@ -58,8 +52,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
function initialize(address _counterpart) external initializer {
ScrollMessengerBase.__ScrollMessengerBase_init(_counterpart, address(0));
maxFailedExecutionTimes = 3;
}
/*****************************
@@ -105,22 +97,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
_executeMessage(_from, _to, _value, _message, _xDomainCalldataHash);
}
/************************
* Restricted Functions *
************************/
/// @notice Update max failed execution times.
/// @dev This function can only called by contract owner.
/// @param _newMaxFailedExecutionTimes The new max failed execution times.
function updateMaxFailedExecutionTimes(uint256 _newMaxFailedExecutionTimes) external onlyOwner {
require(_newMaxFailedExecutionTimes > 0, "maxFailedExecutionTimes cannot be zero");
uint256 _oldMaxFailedExecutionTimes = maxFailedExecutionTimes;
maxFailedExecutionTimes = _newMaxFailedExecutionTimes;
emit UpdateMaxFailedExecutionTimes(_oldMaxFailedExecutionTimes, _newMaxFailedExecutionTimes);
}
/**********************
* Internal Functions *
**********************/
@@ -181,11 +157,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
isL1MessageExecuted[_xDomainCalldataHash] = true;
emit RelayedMessage(_xDomainCalldataHash);
} else {
unchecked {
uint256 _failedTimes = l1MessageFailedTimes[_xDomainCalldataHash] + 1;
require(_failedTimes <= maxFailedExecutionTimes, "Exceed maximum failure times");
l1MessageFailedTimes[_xDomainCalldataHash] = _failedTimes;
}
emit FailedRelayedMessage(_xDomainCalldataHash);
}
}

View File

@@ -6,8 +6,6 @@ import {IL2ERC20Gateway} from "./IL2ERC20Gateway.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
// solhint-disable no-empty-blocks
abstract contract L2ERC20Gateway is ScrollGatewayBase, IL2ERC20Gateway {
/*************
* Variables *

View File

@@ -6,7 +6,6 @@ import {ERC2771Context} from "@openzeppelin/contracts/metatx/ERC2771Context.sol"
import {ReentrancyGuard} from "@openzeppelin/contracts/security/ReentrancyGuard.sol";
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
import {IERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/draft-IERC20Permit.sol";
import {OwnableBase} from "../libraries/common/OwnableBase.sol";
@@ -15,6 +14,7 @@ import {OwnableBase} from "../libraries/common/OwnableBase.sol";
contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
using SafeERC20 for IERC20;
using SafeERC20 for IERC20Permit;
/**********
* Events *
@@ -94,7 +94,7 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
address _sender = _msgSender();
// do permit
IERC20Permit(_permit.token).permit(
IERC20Permit(_permit.token).safePermit(
_sender,
address(this),
_permit.value,

View File

@@ -106,6 +106,8 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
exceedValue = bound(exceedValue, 1, address(this).balance / 2);
l1Messenger.updateMaxReplayTimes(0);
// append a message
l1Messenger.sendMessage{value: 100}(address(0), 100, new bytes(0), 0, refundAddress);
@@ -179,9 +181,9 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
hevm.stopPrank();
hevm.expectEmit(false, false, false, true);
emit UpdateMaxReplayTimes(0, _maxReplayTimes);
emit UpdateMaxReplayTimes(3, _maxReplayTimes);
assertEq(l1Messenger.maxReplayTimes(), 0);
assertEq(l1Messenger.maxReplayTimes(), 3);
l1Messenger.updateMaxReplayTimes(_maxReplayTimes);
assertEq(l1Messenger.maxReplayTimes(), _maxReplayTimes);
}

View File

@@ -7,6 +7,7 @@ import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {L1MessageQueue} from "../L1/rollup/L1MessageQueue.sol";
import {MultipleVersionRollupVerifier} from "../L1/rollup/MultipleVersionRollupVerifier.sol";
import {MockScrollChain} from "./mocks/MockScrollChain.sol";
import {MockZkEvmVerifier} from "./mocks/MockZkEvmVerifier.sol";
contract MultipleVersionRollupVerifierTest is DSTestPlus {
@@ -17,27 +18,54 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
MockZkEvmVerifier private v0;
MockZkEvmVerifier private v1;
MockZkEvmVerifier private v2;
MockScrollChain private chain;
function setUp() external {
v0 = new MockZkEvmVerifier();
v1 = new MockZkEvmVerifier();
v2 = new MockZkEvmVerifier();
chain = new MockScrollChain();
verifier = new MultipleVersionRollupVerifier(address(v0));
}
function testInitialize(address _chain) external {
hevm.assume(_chain != address(0));
// set by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
verifier.initialize(_chain);
hevm.stopPrank();
// succeed
assertEq(verifier.scrollChain(), address(0));
verifier.initialize(_chain);
assertEq(verifier.scrollChain(), _chain);
// initialized, revert
hevm.expectRevert("initialized");
verifier.initialize(_chain);
}
function testUpdateVerifier(address _newVerifier) external {
hevm.assume(_newVerifier != address(0));
verifier.initialize(address(chain));
// set by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
verifier.updateVerifier(0, address(0));
hevm.stopPrank();
// start batch index finalized, revert
hevm.expectRevert("start batch index finalized");
verifier.updateVerifier(0, address(1));
// zero verifier address, revert
hevm.expectRevert("zero verifier address");
verifier.updateVerifier(0, address(0));
verifier.updateVerifier(1, address(0));
// change to random operator
assertEq(verifier.legacyVerifiersLength(), 0);
@@ -65,6 +93,8 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
}
function testGetVerifier() external {
verifier.initialize(address(chain));
verifier.updateVerifier(100, address(v1));
verifier.updateVerifier(300, address(v2));
@@ -80,6 +110,8 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
}
function testVerifyAggregateProof() external {
verifier.initialize(address(chain));
verifier.updateVerifier(100, address(v1));
verifier.updateVerifier(300, address(v2));

View File

@@ -7,18 +7,7 @@ import {ScrollChain} from "../../L1/rollup/ScrollChain.sol";
contract MockScrollChain is ScrollChain {
constructor() ScrollChain(0) {}
/*
function computePublicInputHash(uint64 accTotalL1Messages, Batch memory batch)
external
view
returns (
bytes32,
uint64,
uint64,
uint64
)
{
return _computePublicInputHash(accTotalL1Messages, batch);
function setLastFinalizedBatchIndex(uint256 _lastFinalizedBatchIndex) external {
lastFinalizedBatchIndex = _lastFinalizedBatchIndex;
}
*/
}

View File

@@ -47,7 +47,10 @@ require (
google.golang.org/protobuf v1.31.0 // indirect
)
require github.com/prometheus/client_golang v1.14.0
require (
github.com/google/uuid v1.3.0
github.com/prometheus/client_golang v1.14.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect

View File

@@ -73,6 +73,8 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=

View File

@@ -7,6 +7,7 @@ import (
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
)
var (
@@ -25,9 +26,14 @@ var (
// InitController inits Controller with database
func InitController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) {
initControllerOnce.Do(func() {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
Auth = NewAuthController(db)
HealthCheck = NewHealthCheckController()
GetTask = NewGetTaskController(cfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, db, reg)
GetTask = NewGetTaskController(cfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
})
}

View File

@@ -13,6 +13,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -22,9 +23,9 @@ type GetTaskController struct {
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, reg)
func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, vf.BatchVK, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -40,7 +41,7 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, reg prometheus.Regist
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -48,7 +49,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {
nerr := fmt.Errorf("parameter wrong proof type")
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}

View File

@@ -13,7 +13,8 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof"
coodinatorType "scroll-tech/coordinator/internal/types"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
@@ -22,18 +23,18 @@ type SubmitProofController struct {
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *SubmitProofController {
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, reg),
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
}
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var spp coodinatorType.SubmitProofParameter
var spp coordinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&spp); err != nil {
nerr := fmt.Errorf("parameter invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -51,7 +52,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
@@ -59,7 +60,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
@@ -68,8 +69,8 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
return
}
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
coordinatorType.RenderJSON(ctx, types.Success, nil, nil)
}

View File

@@ -29,10 +29,11 @@ type Collector struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
}
// NewCollector create a collector to cron collect the data to send to prover
@@ -62,10 +63,15 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
Name: "coordinator_chunk_prover_task_timeout_total",
Help: "Total number of chunk timeout prover task.",
}),
checkBatchAllChunkReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_check_batch_all_chunk_ready_run_total",
Help: "Total number of check batch all chunks ready total",
}),
}
go c.timeoutBatchProofTask()
go c.timeoutChunkProofTask()
go c.checkBatchAllChunkReady()
log.Info("Start coordinator successfully.")
@@ -79,7 +85,6 @@ func (c *Collector) Stop() {
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
defer func() {
if err := recover(); err != nil {
@@ -158,16 +163,14 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err := c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as ProverProofInvalid
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, assignedProverTask.UUID, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, assignedProverTask.UUID, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
@@ -189,3 +192,60 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
}
}
}
func (c *Collector) checkBatchAllChunkReady() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("check batch all chunk ready panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
c.checkBatchAllChunkReadyRunTotal.Inc()
page := 1
pageSize := 50
for {
offset := (page - 1) * pageSize
batches, err := c.batchOrm.GetUnassignedAndChunksUnreadyBatches(c.ctx, offset, pageSize)
if err != nil {
log.Warn("checkBatchAllChunkReady GetUnassignedAndChunksUnreadyBatches", "error", err)
break
}
for _, batch := range batches {
allReady, checkErr := c.chunkOrm.CheckIfBatchChunkProofsAreReady(c.ctx, batch.Hash)
if checkErr != nil {
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", batch.Hash)
continue
}
if !allReady {
continue
}
if updateErr := c.batchOrm.UpdateChunkProofsStatusByBatchHash(c.ctx, batch.Hash, types.ChunkProofsStatusReady); updateErr != nil {
log.Warn("checkBatchAllChunkReady UpdateChunkProofsStatusByBatchHash failure", "error", checkErr, "hash", batch.Hash)
}
}
if len(batches) < pageSize {
break
}
page++
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}

View File

@@ -25,13 +25,14 @@ import (
// BatchProverTask is prover task implement for batch proof
type BatchProverTask struct {
BaseProverTask
vk string
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal prometheus.Counter
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
@@ -40,6 +41,7 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Register
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
@@ -68,7 +70,15 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != bp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
@@ -91,7 +101,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if len(batchTasks) != 1 {
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
log.Error("get unassigned batch proving task len not 1", "length", len(batchTasks), "batch tasks", batchTasks)
return nil, ErrCoordinatorInternalFailure
}
batchTask := batchTasks[0]
@@ -99,7 +110,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
bp.batchAttemptsExceedTotal.Inc()
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
// TODO: retry fetching unassigned batch proving task
log.Error("batch task proving attempts reach the maximum", "hash", batchTask.Hash)
return nil, nil
}
proverTask := orm.ProverTask{
@@ -115,15 +128,17 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
bp.recoverProvingStatus(ctx, batchTask)
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", publicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
if err != nil {
bp.recoverProvingStatus(ctx, batchTask)
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.Inc()
@@ -131,11 +146,11 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string) (*coordinatorType.GetTaskSchema, error) {
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, taskID)
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", task.TaskID, err)
return nil, err
}
@@ -144,7 +159,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string)
for _, chunk := range chunks {
var proof message.ChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, taskID, chunk.Hash)
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, &proof)
@@ -166,11 +181,12 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string)
chunkProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", taskID, err)
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: taskID,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
}

View File

@@ -22,16 +22,20 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
vk string
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal prometheus.Counter
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
@@ -40,7 +44,7 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Register
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
@@ -69,7 +73,15 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != cp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
@@ -85,7 +97,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
if err != nil {
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if len(chunkTasks) == 0 {
@@ -93,7 +106,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if len(chunkTasks) != 1 {
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
log.Error("get unassigned chunk proving task len not 1", "length", len(chunkTasks), "chunk tasks", chunkTasks)
return nil, ErrCoordinatorInternalFailure
}
chunkTask := chunkTasks[0]
@@ -102,7 +116,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
cp.chunkAttemptsExceedTotal.Inc()
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
// TODO: retry fetching unassigned chunk proving task
log.Error("chunk task proving attempts reach the maximum", "hash", chunkTask.Hash)
return nil, nil
}
proverTask := orm.ProverTask{
@@ -116,15 +132,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", publicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
if err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.Inc()
@@ -132,11 +151,11 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, task.TaskID)
if wrappedErr != nil || len(wrappedBlocks) == 0 {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, wrappedErr)
return nil, fmt.Errorf("failed to fetch wrapped blocks, chunk hash:%s err:%w", task.TaskID, wrappedErr)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
@@ -149,11 +168,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*
}
blockHashesBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", hash, err)
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
TaskID: hash,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
}

View File

@@ -57,10 +57,6 @@ func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.Pro
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
}
}
// update the prover task status to let timeout checker don't check it.
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.ProverProofInvalid, tx); err != nil {
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
}
return nil
})
if transErr == nil {

View File

@@ -34,6 +34,12 @@ var (
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
)
// ProofReceiverLogic the proof receiver logic
@@ -60,11 +66,7 @@ type ProofReceiverLogic struct {
}
// NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg prometheus.Registerer) *ProofReceiverLogic {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -133,10 +135,21 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return fmt.Errorf("get ProverVersion from context failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
}
proofTime := time.Since(proverTask.CreatedAt)
@@ -161,15 +174,15 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
m.proofRecover(ctx, proverTask, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr == nil {
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
}
return verifyErr
return ErrValidatorSuccessInvalidProof
}
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
@@ -177,10 +190,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
return err
m.proofRecover(ctx, proverTask, proofMsg)
return ErrCoordinatorInternalFailure
}
return nil
@@ -237,14 +250,14 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
// Verify if the proving task has already been assigned to another prover.
// Upon receiving an error message, it's possible the proving status has been reset by another prover
// and the task has been reassigned. In this case, the coordinator should avoid resetting the proving status.
m.processProverErr(ctx, proofMsg.ID, pk, proofMsg.Type)
m.processProverErr(ctx, proverTask)
m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", "failureMessage", failureMsg)
"failureMessage", failureMsg)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -257,7 +270,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
@@ -272,28 +285,28 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return nil
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", hash, "proverPublicKey", pubKey,
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "proverPublicKey", pubKey, "error", err)
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
return nil
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
@@ -303,13 +316,13 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, proverTaskStatus, tx); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", hash, "public key", proverPublicKey)
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil
}
@@ -322,20 +335,20 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof into db", "hash", hash, "public key", proverPublicKey, "error", storeProofErr)
log.Error("failed to store chunk/batch proof into db", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
switch proofMsg.Type {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
if err := m.chunkOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
if err := m.batchOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
}
@@ -347,7 +360,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
}
@@ -376,14 +389,15 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk string, taskType message.ProofType) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, taskType, taskID, pk, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, proverTask *orm.ProverTask) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "uuid", proverTask.UUID, "taskID", proverTask.TaskID, "proverPublicKey",
proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", updateErr)
}
proverTasks, err := m.proverTaskOrm.GetValidOrAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, message.ProofType(proverTask.TaskType), proverTask.TaskID, proverTask.ProverPublicKey)
if err != nil {
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
log.Warn("checkIsAssignedToOtherProver failure", "taskID", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
return
}
@@ -391,19 +405,19 @@ func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk st
return
}
switch taskType {
switch message.ProofType(proverTask.TaskType) {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
}
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte
var marshalErr error
@@ -417,5 +431,5 @@ func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk strin
if len(proofBytes) == 0 || marshalErr != nil {
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
}
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofBytes)
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes)
}

View File

@@ -8,11 +8,6 @@ import (
"scroll-tech/coordinator/internal/config"
)
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a mock halo2 verifier.
type Verifier struct{}
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil

View File

@@ -0,0 +1,15 @@
package verifier
import (
"scroll-tech/coordinator/internal/config"
)
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
BatchVK string
ChunkVK string
}

View File

@@ -11,7 +11,11 @@ package verifier
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/json"
"io"
"os"
"path"
"unsafe"
"github.com/scroll-tech/go-ethereum/log"
@@ -21,14 +25,6 @@ import (
"scroll-tech/common/types/message"
)
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
}
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
@@ -44,7 +40,21 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
return &Verifier{cfg: cfg}, nil
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
return &Verifier{
cfg: cfg,
BatchVK: batchVK,
ChunkVK: chunkVK,
}, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
@@ -96,3 +106,15 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
verified := C.verify_chunk_proof(proofStr)
return verified != 0, nil
}
func readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
}
byt, err := io.ReadAll(f)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(byt), nil
}

View File

@@ -91,6 +91,26 @@ func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch,
return batches, nil
}
// GetUnassignedAndChunksUnreadyBatches get the batches which is unassigned and chunks is not ready
func (o *Batch) GetUnassignedAndChunksUnreadyBatches(ctx context.Context, offset, limit int) ([]*Batch, error) {
if offset < 0 || limit < 0 {
return nil, errors.New("limit and offset must not be smaller than 0")
}
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
db = db.Where("chunk_proofs_status = ?", types.ChunkProofsStatusPending)
db = db.Order("index ASC")
db = db.Offset(offset)
db = db.Limit(limit)
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedAndChunksUnreadyBatches error: %w", err)
}
return batches, nil
}
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned.
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
db := o.db.WithContext(ctx)

View File

@@ -234,7 +234,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
@@ -306,15 +306,8 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
func (o *Chunk) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
updateFields["prover_assigned_at"] = nil
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)

View File

@@ -77,12 +77,13 @@ func TestProverTaskOrm(t *testing.T) {
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
// test decimal reward, get reward
resultReward := proverTasks[0].Reward.BigInt()
@@ -91,12 +92,8 @@ func TestProverTaskOrm(t *testing.T) {
proverTask.ProvingStatus = int16(types.ProverProofValid)
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.Error(t, err)
}
func TestProverTaskOrmUint256(t *testing.T) {
@@ -117,8 +114,9 @@ func TestProverTaskOrmUint256(t *testing.T) {
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasksUint256))

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/google/uuid"
"github.com/shopspring/decimal"
"gorm.io/gorm"
"gorm.io/gorm/clause"
@@ -18,7 +19,8 @@ import (
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`
ID int64 `json:"id" gorm:"column:id"`
ID int64 `json:"id" gorm:"column:id"`
UUID uuid.UUID `json:"uuid" gorm:"column:uuid;type:uuid;default:gen_random_uuid()"`
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
@@ -114,14 +116,16 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
return proverTasks, nil
}
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
// GetAssignedProverTaskByTaskIDAndProver get prover task taskID and public key
// TODO: when prover all upgrade need DEPRECATED this function
func (o *ProverTask) GetAssignedProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key", proverPublicKey)
db = db.Where("prover_version", proverVersion)
db = db.Where("proving_status", types.ProverAssigned)
var proverTask ProverTask
err := db.First(&proverTask).Error
@@ -131,14 +135,29 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskTyp
return &proverTask, nil
}
// GetValidOrAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
func (o *ProverTask) GetValidOrAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("uuid", uuid)
db = db.Where("prover_public_key", publicKey)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByUUID err:%w, uuid:%s publicKey:%s", err, uuid, publicKey)
}
return &proverTask, nil
}
// GetAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
func (o *ProverTask) GetAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key != ?", proverPublicKey)
db = db.Where("proving_status in (?)", []int{int(types.ProverAssigned), int(types.ProverProofValid)})
db = db.Where("proving_status = ?", int(types.ProverAssigned))
var proverTasks []ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
@@ -199,96 +218,59 @@ func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskType messa
return false
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
// InsertProverTask insert a prover Task record
func (o *ProverTask) InsertProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Clauses(clause.Returning{})
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.SetProverTask error: %w, prover task: %v", err, proverTask)
if err := db.Create(proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.InsertProverTask error: %w, prover task: %v", err, proverTask)
}
return nil
}
// UpdateProverTaskProof update the prover task's proof
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof []byte) error {
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, uuid uuid.UUID, proof []byte) error {
db := o.db
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
db = db.Where("uuid = ?", uuid)
if err := db.Update("proof", proof).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, proof type: %v, taskID: %v, prover public key: %v", err, proofType.String(), taskID, pk)
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, uuid: %v", err, uuid)
}
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, uuid uuid.UUID, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
db = db.Where("uuid = ?", uuid)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
}
return nil
}
// UpdateAllProverTaskProvingStatusOfTaskID updates all the proving_status of a specific task id.
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ?", int(proofType), taskID)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskProvingStatusOfTaskID error: %w, proof type: %v, taskID: %v, status: %v", err, proofType.String(), taskID, status.String())
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, uuid:%s, status: %v", err, uuid, status.String())
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, proofType message.ProofType, taskID string, pk string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", pk).Where("task_type", int(proofType))
db = db.Where("uuid", uuid)
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, proof type: %v, taskID: %v, prover public key: %v, failure type: %v", err, proofType.String(), taskID, pk, failureType.String())
}
return nil
}
// UpdateAllProverTaskFailureTypeOfTaskID update the prover task failure type
func (o *ProverTask) UpdateAllProverTaskFailureTypeOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskFailureTypeOfTaskID error: %w, proof type: %v, taskID: %v, failure type: %v", err, proofType.String(), taskID, failureType.String())
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, uuid:%s, failure type: %v", err, uuid.String(), failureType.String())
}
return nil
}

View File

@@ -2,12 +2,14 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
ProverHeight int `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
ProverHeight int `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`
}
// GetTaskSchema the schema data return to prover for get prover task
type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`

View File

@@ -2,6 +2,8 @@ package types
// SubmitProofParameter the SubmitProof api request parameter
type SubmitProofParameter struct {
// TODO when prover have upgrade, need change this field to required
UUID string `form:"uuid" json:"uuid"`
TaskID string `form:"task_id" json:"task_id" binding:"required"`
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`

View File

@@ -110,7 +110,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
}
func setEnv(t *testing.T) {
version.Version = "v1.2.3-aaa-bbb-ccc"
version.Version = "v4.1.98-aaa-bbb-ccc"
base = docker.NewDockerApp()
base.RunDBImage(t)

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 8, int(cur))
assert.Equal(t, 9, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -1,15 +1,15 @@
-- +goose Up
-- +goose StatementBegin
drop index l1_message_hash_uindex;
drop index if exists l1_message_hash_uindex;
create index l1_message_hash_index
create index if not exists l1_message_hash_index
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index l1_message_hash_index;
drop index if exists l1_message_hash_index;
create unique index l1_message_hash_uindex
create unique index if not exists l1_message_hash_uindex
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -0,0 +1,16 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE prover_task ADD COLUMN uuid uuid DEFAULT gen_random_uuid() NOT NULL UNIQUE;
create index if not exists idx_uuid on prover_task (uuid) where deleted_at IS NULL;
ALTER TABLE prover_task DROP CONSTRAINT uk_tasktype_taskid_publickey_version;
drop index if exists uk_tasktype_taskid_publickey_version;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
create unique index if not exists uk_tasktype_taskid_publickey_version
on prover_task (task_type, task_id, prover_public_key, prover_version) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -43,6 +43,7 @@ type LoginResponse struct {
type GetTaskRequest struct {
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`
}
// GetTaskResponse defines the response structure for GetTask API
@@ -50,6 +51,7 @@ type GetTaskResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
@@ -58,6 +60,7 @@ type GetTaskResponse struct {
// SubmitProofRequest defines the request structure for the SubmitProof API.
type SubmitProofRequest struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
Status int `json:"status"`

View File

@@ -5,6 +5,7 @@
"db_path": "unique-db-path-for-prover-1",
"core": {
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
},
"coordinator": {

View File

@@ -25,6 +25,7 @@ type Config struct {
// ProverCoreConfig load zk prover config.
type ProverCoreConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ProofType message.ProofType `json:"proof_type,omitempty"` // 1: chunk prover (default type), 2: batch prover
DumpDir string `json:"dump_dir,omitempty"`
}

View File

@@ -42,3 +42,7 @@ func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo,
Vk: _empty[:],
}, nil
}
func (p *ProverCore) GetVk() string {
return ""
}

View File

@@ -28,19 +28,22 @@ import (
// ProverCore sends block-traces to rust-prover through ffi and get back the zk-proof.
type ProverCore struct {
cfg *config.ProverCoreConfig
vk string
}
// NewProverCore inits a ProverCore object.
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
defer func() {
C.free(unsafe.Pointer(paramsPathStr))
C.free(unsafe.Pointer(assetsPathStr))
}()
if cfg.ProofType == message.ProofTypeBatch {
C.init_batch_prover(paramsPathStr)
C.init_batch_prover(paramsPathStr, assetsPathStr)
} else if cfg.ProofType == message.ProofTypeChunk {
C.init_chunk_prover(paramsPathStr)
C.init_chunk_prover(paramsPathStr, assetsPathStr)
}
if cfg.DumpDir != "" {
@@ -54,6 +57,26 @@ func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
return &ProverCore{cfg: cfg}, nil
}
// GetVk get Base64 format of vk.
func (p *ProverCore) GetVk() string {
if p.vk != "" { // cached
return p.vk
}
var raw *C.char
if p.cfg.ProofType == message.ProofTypeBatch {
raw = C.get_batch_vk()
} else if p.cfg.ProofType == message.ProofTypeChunk {
raw = C.get_chunk_vk()
}
if raw != nil {
p.vk = C.GoString(raw) // cache it
}
return p.vk
}
// ProveBatch call rust ffi to generate batch proof.
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
if p.cfg.ProofType != message.ProofTypeBatch {
@@ -68,6 +91,11 @@ func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo,
if err != nil {
return nil, err
}
if !p.checkChunkProofs(chunkProofsByt) {
return nil, fmt.Errorf("Non-match chunk protocol: task-id = %s", taskID)
}
proofByt := p.proveBatch(chunkInfosByt, chunkProofsByt)
err = p.mayDumpProof(taskID, proofByt)
@@ -112,6 +140,20 @@ func (p *ProverCore) TracesToChunkInfo(traces []*types.BlockTrace) (*message.Chu
return chunkInfo, json.Unmarshal(chunkInfoByt, chunkInfo)
}
func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) bool {
chunkProofsStr := C.CString(string(chunkProofsByt))
defer func() {
C.free(unsafe.Pointer(chunkProofsStr))
}()
log.Info("Start to check chunk proofs ...")
valid := C.check_chunk_proofs(chunkProofsStr)
log.Info("Finish checking chunk proofs!")
return valid != 0
}
func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) []byte {
chunkInfosStr := C.CString(string(chunkInfosByt))
chunkProofsStr := C.CString(string(chunkProofsByt))

View File

@@ -4,6 +4,7 @@
package core_test
import (
"encoding/base64"
"encoding/json"
"flag"
"io"
@@ -22,9 +23,12 @@ import (
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
tracePath2 = flag.String("trace2", "/assets/traces/10_transfer.json", "chunk trace 2")
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
)
func TestFFI(t *testing.T) {
@@ -33,6 +37,7 @@ func TestFFI(t *testing.T) {
chunkProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeChunk,
}
chunkProverCore, err := core.NewProverCore(chunkProverConfig)
@@ -83,9 +88,13 @@ func TestFFI(t *testing.T) {
as.NoError(err)
t.Log("Generated and dumped chunk proof 2")
as.Equal(chunkProverCore.GetVk(), readVk(*chunkVkPath, as))
t.Log("Chunk VKs are equal")
batchProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeBatch,
}
batchProverCore, err := core.NewProverCore(batchProverConfig)
@@ -96,6 +105,9 @@ func TestFFI(t *testing.T) {
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
as.NoError(err)
t.Log("Generated and dumped batch proof")
as.Equal(batchProverCore.GetVk(), readVk(*batchVkPath, as))
t.Log("Batch VKs are equal")
}
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
@@ -109,3 +121,12 @@ func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
return []*types.BlockTrace{trace}
}
func readVk(filePat string, as *assert.Assertions) string {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
return base64.StdEncoding.EncodeToString(byt)
}

View File

@@ -4,6 +4,7 @@ go 1.19
require (
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.3.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
@@ -19,7 +20,6 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.0.3 // indirect

View File

@@ -164,7 +164,7 @@ func (r *Prover) proveAndSubmit() error {
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
return r.submitErr(task, message.ProofFailureNoPanic, err)
}
return r.submitProof(proofMsg)
return r.submitProof(proofMsg, task.Task.UUID)
}
// if tried times >= 3, it's probably due to circuit proving panic
@@ -177,6 +177,9 @@ func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.GetVk(),
}
if req.TaskType == message.ProofTypeChunk {
@@ -200,6 +203,7 @@ func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// create a new TaskMsg
taskMsg := message.TaskMsg{
UUID: resp.Data.UUID,
ID: resp.Data.TaskID,
Type: message.ProofType(resp.Data.TaskType),
}
@@ -292,9 +296,10 @@ func (r *Prover) proveBatch(task *store.ProvingTask) (*message.BatchProof, error
return r.proverCore.ProveBatch(task.Task.ID, task.Task.BatchTaskDetail.ChunkInfos, task.Task.BatchTaskDetail.ChunkProofs)
}
func (r *Prover) submitProof(msg *message.ProofDetail) error {
func (r *Prover) submitProof(msg *message.ProofDetail, uuid string) error {
// prepare the submit request
req := &client.SubmitProofRequest{
UUID: uuid,
TaskID: msg.ID,
TaskType: int(msg.Type),
Status: int(msg.Status),
@@ -341,6 +346,7 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.ProofFailureType, err error) error {
// prepare the submit request
req := &client.SubmitProofRequest{
UUID: task.Task.UUID,
TaskID: task.Task.ID,
TaskType: int(task.Task.Type),
Status: int(message.StatusProofError),

View File

@@ -6,6 +6,7 @@ import (
"strconv"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
@@ -23,9 +24,12 @@ func TestStack(t *testing.T) {
defer s.Close()
for i := 0; i < 3; i++ {
taskUUID, uuidErr := uuid.NewRandom()
assert.NoError(t, uuidErr)
task := &ProvingTask{
Task: &message.TaskMsg{
ID: strconv.Itoa(i),
UUID: taskUUID.String(),
ID: strconv.Itoa(i),
},
Times: 0,
}
@@ -44,9 +48,12 @@ func TestStack(t *testing.T) {
}
// test times
taskUUID, uuidErr := uuid.NewRandom()
assert.NoError(t, uuidErr)
task := &ProvingTask{
Task: &message.TaskMsg{
ID: strconv.Itoa(1),
UUID: taskUUID.String(),
ID: strconv.Itoa(1),
},
Times: 0,
}

View File

@@ -51,7 +51,7 @@ func TestMain(m *testing.M) {
}
func TestCoordinatorProverInteraction(t *testing.T) {
// Start postgres docker containers.
// Start postgres docker containers
base.RunL2Geth(t)
base.RunDBImage(t)

View File

@@ -113,7 +113,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}