mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24a0fd08ac | ||
|
|
2840485f38 | ||
|
|
dab21fc712 | ||
|
|
c44b7f7bf4 | ||
|
|
a8c71b5e36 | ||
|
|
ae2f62df00 | ||
|
|
ce5c6e0aa3 | ||
|
|
e8ddf99184 | ||
|
|
ebf2b429a3 |
@@ -226,7 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
@@ -183,6 +183,10 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks {
|
||||
// metric values
|
||||
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
|
||||
lastTotalL1CommitGas := totalL1CommitGas
|
||||
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
@@ -230,9 +234,9 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
p.totalL1CommitGas.Set(float64(lastTotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(i))
|
||||
return dbChunks[:i], nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,6 +199,13 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
crc := chunkRowConsumption{}
|
||||
|
||||
for i, block := range blocks {
|
||||
// metric values
|
||||
lastTotalL2TxNum := totalL2TxNum
|
||||
lastTotalL1CommitGas := totalL1CommitGas
|
||||
lastCrcMax := crc.max()
|
||||
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
|
||||
lastTotalTxGasUsed := totalTxGasUsed
|
||||
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
@@ -258,18 +265,18 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"totalL2TxNum", totalL2TxNum,
|
||||
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"chunkRowConsumptionMax", crcMax,
|
||||
"chunkRowConsumption", crc,
|
||||
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crcMax))
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
p.chunkL2TxNum.Set(float64(lastTotalL2TxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(lastCrcMax))
|
||||
p.totalTxGasUsed.Set(float64(lastTotalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
28
common/libzkp/impl/Cargo.lock
generated
28
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1049,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1226,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1439,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1479,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -2077,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2264,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2279,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2754,8 +2754,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.2#a29cbaae9cb52b0eb61a4418caf6fbb6eb5d28f4"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.2#a29cbaae9cb52b0eb61a4418caf6fbb6eb5d28f4"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4491,7 +4491,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -20,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.2" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.2" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.97"
|
||||
var tag = "v4.1.106"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -78,7 +78,7 @@ func CheckScrollProverVersionTag(proverVersion string) bool {
|
||||
if remoteTagMinor != 1 {
|
||||
return false
|
||||
}
|
||||
if remoteTagPatch < 96 {
|
||||
if remoteTagPatch < 98 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -41,7 +41,7 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
nerr := fmt.Errorf("parameter wrong proof type")
|
||||
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/submitproof"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coodinatorType "scroll-tech/coordinator/internal/types"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
@@ -31,10 +31,10 @@ func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Veri
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var spp coodinatorType.SubmitProofParameter
|
||||
var spp coordinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&spp); err != nil {
|
||||
nerr := fmt.Errorf("parameter invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
@@ -60,7 +60,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
@@ -69,8 +69,8 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
}
|
||||
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.Success, nil, nil)
|
||||
}
|
||||
|
||||
@@ -29,10 +29,11 @@ type Collector struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
checkBatchAllChunkReadyRunTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
@@ -62,10 +63,15 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
Name: "coordinator_chunk_prover_task_timeout_total",
|
||||
Help: "Total number of chunk timeout prover task.",
|
||||
}),
|
||||
checkBatchAllChunkReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_check_batch_all_chunk_ready_run_total",
|
||||
Help: "Total number of check batch all chunks ready total",
|
||||
}),
|
||||
}
|
||||
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
go c.checkBatchAllChunkReady()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
|
||||
@@ -79,7 +85,6 @@ func (c *Collector) Stop() {
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
|
||||
func (c *Collector) timeoutBatchProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
@@ -189,3 +194,60 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) checkBatchAllChunkReady() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("check batch all chunk ready panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.checkBatchAllChunkReadyRunTotal.Inc()
|
||||
page := 1
|
||||
pageSize := 50
|
||||
for {
|
||||
offset := (page - 1) * pageSize
|
||||
batches, err := c.batchOrm.GetUnassignedAndChunksUnreadyBatches(c.ctx, offset, pageSize)
|
||||
if err != nil {
|
||||
log.Warn("checkBatchAllChunkReady GetUnassignedAndChunksUnreadyBatches", "error", err)
|
||||
break
|
||||
}
|
||||
|
||||
for _, batch := range batches {
|
||||
allReady, checkErr := c.chunkOrm.CheckIfBatchChunkProofsAreReady(c.ctx, batch.Hash)
|
||||
if checkErr != nil {
|
||||
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", batch.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if !allReady {
|
||||
continue
|
||||
}
|
||||
|
||||
if updateErr := c.batchOrm.UpdateChunkProofsStatusByBatchHash(c.ctx, batch.Hash, types.ChunkProofsStatusReady); updateErr != nil {
|
||||
log.Warn("checkBatchAllChunkReady UpdateChunkProofsStatusByBatchHash failure", "error", checkErr, "hash", batch.Hash)
|
||||
}
|
||||
}
|
||||
|
||||
if len(batches) < pageSize {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
|
||||
log.Error("get unassigned batch proving task len not 1", "length", len(batchTasks), "batch tasks", batchTasks)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
@@ -109,7 +110,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
|
||||
bp.batchAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
// TODO: retry fetching unassigned batch proving task
|
||||
log.Error("batch task proving attempts reach the maximum", "hash", batchTask.Hash)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -127,13 +130,15 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
|
||||
log.Error("db set session info fail", "task hash", batchTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
|
||||
if err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.Inc()
|
||||
|
||||
@@ -22,6 +22,9 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
@@ -94,7 +97,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
@@ -102,7 +106,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
|
||||
log.Error("get unassigned chunk proving task len not 1", "length", len(chunkTasks), "chunk tasks", chunkTasks)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
@@ -111,7 +116,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
|
||||
cp.chunkAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
// TODO: retry fetching unassigned chunk proving task
|
||||
log.Error("chunk task proving attempts reach the maximum", "hash", chunkTask.Hash)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -127,13 +134,15 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
|
||||
log.Error("db set session info fail", "task hash", chunkTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
|
||||
if err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.Inc()
|
||||
|
||||
@@ -34,6 +34,12 @@ var (
|
||||
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
|
||||
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
|
||||
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
|
||||
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
|
||||
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
|
||||
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
|
||||
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
)
|
||||
|
||||
// ProofReceiverLogic the proof receiver logic
|
||||
@@ -162,10 +168,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr == nil {
|
||||
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
}
|
||||
return verifyErr
|
||||
return ErrValidatorSuccessInvalidProof
|
||||
}
|
||||
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
@@ -176,7 +182,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
return err
|
||||
return ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -377,7 +383,7 @@ func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk st
|
||||
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
|
||||
}
|
||||
|
||||
proverTasks, err := m.proverTaskOrm.GetValidOrAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
|
||||
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
|
||||
if err != nil {
|
||||
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
|
||||
return
|
||||
|
||||
@@ -91,6 +91,26 @@ func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch,
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetUnassignedAndChunksUnreadyBatches get the batches which is unassigned and chunks is not ready
|
||||
func (o *Batch) GetUnassignedAndChunksUnreadyBatches(ctx context.Context, offset, limit int) ([]*Batch, error) {
|
||||
if offset < 0 || limit < 0 {
|
||||
return nil, errors.New("limit and offset must not be smaller than 0")
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
db = db.Where("chunk_proofs_status = ?", types.ChunkProofsStatusPending)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Offset(offset)
|
||||
db = db.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedAndChunksUnreadyBatches error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned.
|
||||
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -131,14 +131,14 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskTyp
|
||||
return &proverTask, nil
|
||||
}
|
||||
|
||||
// GetValidOrAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
|
||||
func (o *ProverTask) GetValidOrAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
|
||||
// GetAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
|
||||
func (o *ProverTask) GetAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key != ?", proverPublicKey)
|
||||
db = db.Where("proving_status in (?)", []int{int(types.ProverAssigned), int(types.ProverProofValid)})
|
||||
db = db.Where("proving_status = ?", int(types.ProverAssigned))
|
||||
|
||||
var proverTasks []ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
|
||||
@@ -110,7 +110,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
version.Version = "v4.1.97-aaa-bbb-ccc"
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
Reference in New Issue
Block a user