mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ee65119d8 | ||
|
|
fb1c800532 | ||
|
|
2baad2ecad | ||
|
|
bdf2968771 | ||
|
|
4d09e13b0c | ||
|
|
a98a2ff4b5 | ||
|
|
2a0c7ae6b5 | ||
|
|
b0b6a3db5e | ||
|
|
604a4407a2 | ||
|
|
fe0ecc366d | ||
|
|
8bb9b22741 | ||
|
|
27857b6fbb | ||
|
|
d7ce6c0b6f | ||
|
|
0b5b0dde4c | ||
|
|
f44ec751a0 | ||
|
|
8013ab8529 | ||
|
|
f16a300d87 | ||
|
|
559ad6a5f1 | ||
|
|
ab34ff0315 | ||
|
|
d183125c59 | ||
|
|
a36b4bb8ed | ||
|
|
9ce3d4188c | ||
|
|
51fa5c823c | ||
|
|
7fce3c30c6 |
14
Makefile
14
Makefile
@@ -15,18 +15,22 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/prover/ -w .
|
||||
goimports -local $(PWD)/prover-stats-api/ -w .
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## build docker images for development/testing usages
|
||||
docker build -t scroll_l1geth ./common/docker/l1geth/
|
||||
|
||||
@@ -53,6 +53,7 @@ func (m *MsgProofUpdater) Start() {
|
||||
continue
|
||||
}
|
||||
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
|
||||
log.Info("latest batc with proof", "batch_index", latestBatchIndexWithProof)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
|
||||
continue
|
||||
@@ -75,8 +76,16 @@ func (m *MsgProofUpdater) Start() {
|
||||
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
|
||||
break
|
||||
}
|
||||
// here we update batch withdraw root
|
||||
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, batch.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
|
||||
if err != nil {
|
||||
// if failed better restart the binary
|
||||
log.Error("MsgProofUpdater: can not update batch withdraw root", "err", err)
|
||||
break
|
||||
}
|
||||
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
|
||||
if err != nil {
|
||||
// if failed better restart the binary
|
||||
log.Error("MsgProofUpdater: can not update msg proof", "err", err)
|
||||
break
|
||||
}
|
||||
@@ -174,7 +183,10 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, b.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -62,7 +62,7 @@ require (
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.2 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
|
||||
@@ -214,8 +214,8 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
|
||||
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
|
||||
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
|
||||
37
bridge-history-api/internal/controller/batch_controller.go
Normal file
37
bridge-history-api/internal/controller/batch_controller.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/logic"
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// BatchController contains the query claimable txs service
|
||||
type BatchController struct {
|
||||
batchLogic *logic.BatchLogic
|
||||
}
|
||||
|
||||
// NewBatchController return NewBatchController instance
|
||||
func NewBatchController(db *gorm.DB) *BatchController {
|
||||
return &BatchController{
|
||||
batchLogic: logic.NewBatchLogic(db),
|
||||
}
|
||||
}
|
||||
|
||||
// GetWithdrawRootByBatchIndex defines the http get method behavior
|
||||
func (b *BatchController) GetWithdrawRootByBatchIndex(ctx *gin.Context) {
|
||||
var req types.QueryByBatchIndexRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
result, err := b.batchLogic.GetWithdrawRootByBatchIndex(ctx, req.BatchIndex)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderJSON(ctx, types.Success, nil, result)
|
||||
}
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
|
||||
var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
HistoryCtrler *HistoryController
|
||||
// BatchCtrler is controller instance
|
||||
BatchCtrler *BatchController
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
@@ -16,5 +18,6 @@ var (
|
||||
func InitController(db *gorm.DB) {
|
||||
initControllerOnce.Do(func() {
|
||||
HistoryCtrler = NewHistoryController(db)
|
||||
BatchCtrler = NewBatchController(db)
|
||||
})
|
||||
}
|
||||
|
||||
35
bridge-history-api/internal/logic/batch_logic.go
Normal file
35
bridge-history-api/internal/logic/batch_logic.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// BatchLogic example service.
|
||||
type BatchLogic struct {
|
||||
rollupOrm *orm.RollupBatch
|
||||
}
|
||||
|
||||
// NewBatchLogic returns services backed with a "db"
|
||||
func NewBatchLogic(db *gorm.DB) *BatchLogic {
|
||||
logic := &BatchLogic{rollupOrm: orm.NewRollupBatch(db)}
|
||||
return logic
|
||||
}
|
||||
|
||||
// GetWithdrawRootByBatchIndex get withdraw root by batch index from db
|
||||
func (b *BatchLogic) GetWithdrawRootByBatchIndex(ctx context.Context, batchIndex uint64) (string, error) {
|
||||
batch, err := b.rollupOrm.GetRollupBatchByIndex(ctx, batchIndex)
|
||||
if err != nil {
|
||||
log.Debug("getWithdrawRootByBatchIndex failed", "error", err)
|
||||
return "", err
|
||||
}
|
||||
if batch == nil {
|
||||
log.Debug("getWithdrawRootByBatchIndex failed", "error", "batch not found")
|
||||
return "", nil
|
||||
}
|
||||
return batch.WithdrawRoot, nil
|
||||
}
|
||||
@@ -112,6 +112,8 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
|
||||
txInfo.To = crossMsg.Target
|
||||
txInfo.BlockTimestamp = crossMsg.Timestamp
|
||||
txInfo.CreatedAt = crossMsg.CreatedAt
|
||||
txInfo.L1Token = crossMsg.Layer1Token
|
||||
txInfo.L2Token = crossMsg.Layer2Token
|
||||
}
|
||||
txHistories = append(txHistories, txInfo)
|
||||
}
|
||||
@@ -136,6 +138,8 @@ func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address common.Addre
|
||||
Hash: msg.Layer1Hash + msg.Layer2Hash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
L1Token: msg.Layer1Token,
|
||||
L2Token: msg.Layer2Token,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
BlockNumber: msg.Height,
|
||||
BlockTimestamp: msg.Timestamp,
|
||||
@@ -166,6 +170,8 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*
|
||||
Amount: l1result.Amount,
|
||||
To: l1result.Target,
|
||||
IsL1: true,
|
||||
L1Token: l1result.Layer1Token,
|
||||
L2Token: l1result.Layer2Token,
|
||||
BlockNumber: l1result.Height,
|
||||
BlockTimestamp: l1result.Timestamp,
|
||||
CreatedAt: l1result.CreatedAt,
|
||||
@@ -187,6 +193,8 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*
|
||||
Amount: l2result.Amount,
|
||||
To: l2result.Target,
|
||||
IsL1: false,
|
||||
L1Token: l2result.Layer1Token,
|
||||
L2Token: l2result.Layer2Token,
|
||||
BlockNumber: l2result.Height,
|
||||
BlockTimestamp: l2result.Timestamp,
|
||||
CreatedAt: l2result.CreatedAt,
|
||||
|
||||
@@ -15,6 +15,7 @@ func Route(router *gin.Engine, conf *config.Config) {
|
||||
router.Use(cors.New(cors.Config{
|
||||
AllowOrigins: []string{"*"},
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
AllowHeaders: []string{"Origin", "Content-Type", "Authorization"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 12 * time.Hour,
|
||||
}))
|
||||
@@ -23,4 +24,5 @@ func Route(router *gin.Engine, conf *config.Config) {
|
||||
r.GET("/txs", controller.HistoryCtrler.GetAllTxsByAddr)
|
||||
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
|
||||
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
|
||||
r.GET("/withdraw_root", controller.BatchCtrler.GetWithdrawRootByBatchIndex)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ const (
|
||||
ErrGetTxsByHashFailure = 40003
|
||||
// ErrGetTxsByAddrFailure is getting txs by address error
|
||||
ErrGetTxsByAddrFailure = 40004
|
||||
// ErrGetWithdrawRootByBatchIndexFailure is getting withdraw root by batch index error
|
||||
ErrGetWithdrawRootByBatchIndexFailure = 40005
|
||||
)
|
||||
|
||||
// QueryByAddressRequest the request parameter of address api
|
||||
@@ -32,6 +34,12 @@ type QueryByHashRequest struct {
|
||||
Txs []string `raw:"txs" binding:"required"`
|
||||
}
|
||||
|
||||
// QueryByBatchIndexRequest the request parameter of batch index api
|
||||
type QueryByBatchIndexRequest struct {
|
||||
// BatchIndex can not be 0, because we dont decode the genesis block
|
||||
BatchIndex uint64 `form:"batch_index" binding:"required"`
|
||||
}
|
||||
|
||||
// ResultData contains return txs and total
|
||||
type ResultData struct {
|
||||
Result []*TxHistoryInfo `json:"result"`
|
||||
@@ -73,6 +81,8 @@ type TxHistoryInfo struct {
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
L1Token string `json:"l1Token"`
|
||||
L2Token string `json:"l2Token"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
|
||||
FinalizeTx *Finalized `json:"finalizeTx"`
|
||||
|
||||
@@ -19,6 +19,7 @@ type RollupBatch struct {
|
||||
CommitHeight uint64 `json:"commit_height" gorm:"column:commit_height"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root;default:NULL"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
@@ -92,3 +93,12 @@ func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBa
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupBatchWithdrawRoot updates the withdraw_root column in rollup_batch table
|
||||
func (r *RollupBatch) UpdateRollupBatchWithdrawRoot(ctx context.Context, batchIndex uint64, withdrawRoot string) error {
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", batchIndex).Update("withdraw_root", withdrawRoot).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RollupBatch.UpdateRuollupBatch error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, er
|
||||
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
|
||||
}
|
||||
// Watch for overflow, tho its not likely to happen
|
||||
return int64(result.Height), nil
|
||||
return int64(result.BatchIndex), nil
|
||||
}
|
||||
|
||||
// GetL2SentMsgMsgHashByHeightRange get l2 sent msg msg hash by height range
|
||||
|
||||
@@ -8,6 +8,7 @@ create table rollup_batch
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
withdraw_root TEXT DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"min_l1_commit_calldata_size_per_chunk": 11234,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
|
||||
@@ -6,7 +6,7 @@ require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -26,7 +26,7 @@ require (
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.2 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
@@ -43,7 +43,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.6.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
|
||||
@@ -43,8 +43,8 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
|
||||
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
|
||||
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
@@ -104,10 +104,10 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
|
||||
github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE=
|
||||
github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
|
||||
@@ -34,6 +34,7 @@ type ChunkProposerConfig struct {
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
@@ -171,9 +172,10 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}},
|
||||
}
|
||||
|
||||
@@ -266,8 +268,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ func testBatchProposer(t *testing.T) {
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
@@ -2,9 +2,11 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -14,6 +16,31 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
|
||||
type chunkRowConsumption map[string]uint64
|
||||
|
||||
// add accumulates row consumption per sub-circuit
|
||||
func (crc *chunkRowConsumption) add(rowConsumption *gethTypes.RowConsumption) error {
|
||||
if rowConsumption == nil {
|
||||
return errors.New("rowConsumption is <nil>")
|
||||
}
|
||||
for _, subCircuit := range *rowConsumption {
|
||||
(*crc)[subCircuit.Name] += subCircuit.RowNumber
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// max finds the maximum row consumption among all sub-circuits
|
||||
func (crc *chunkRowConsumption) max() uint64 {
|
||||
var max uint64
|
||||
for _, value := range *crc {
|
||||
if value > max {
|
||||
max = value
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
@@ -27,6 +54,7 @@ type ChunkProposer struct {
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
}
|
||||
@@ -43,6 +71,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
}
|
||||
@@ -95,8 +124,13 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
crc := chunkRowConsumption{}
|
||||
totalL1CommitGas := chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(firstBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
@@ -135,6 +169,15 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
crc,
|
||||
max,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for _, block := range blocks[1:] {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
@@ -142,10 +185,16 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
|
||||
crc.max() > p.maxRowConsumptionPerChunk {
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
|
||||
break
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ func testChunkProposer(t *testing.T) {
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
@@ -44,3 +45,28 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
}
|
||||
|
||||
func testChunkProposerRowConsumption(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 0)
|
||||
}
|
||||
|
||||
@@ -162,9 +162,12 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
var blocks []*types.WrappedBlock
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("failed to GetBlockByNumber: %v. number: %v", err2, number)
|
||||
block, err := w.GetBlockByNumberOrHash(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(number)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
|
||||
}
|
||||
if block.RowConsumption == nil {
|
||||
return fmt.Errorf("fetched block does not contain RowConsumption. number: %v", number)
|
||||
}
|
||||
|
||||
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
|
||||
@@ -173,11 +176,11 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
if err3 != nil {
|
||||
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &types.WrappedBlock{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
RowConsumption: block.RowConsumption,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// Run chunk-proposer test cases.
|
||||
t.Run("TestChunkProposer", testChunkProposer)
|
||||
t.Run("TestChunkProposerRowConsumption", testChunkProposerRowConsumption)
|
||||
|
||||
// Run batch-proposer test cases.
|
||||
t.Run("TestBatchProposer", testBatchProposer)
|
||||
|
||||
@@ -29,6 +29,7 @@ type L2Block struct {
|
||||
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
RowConsumption string `json:"row_consumption" gorm:"row_consumption"`
|
||||
|
||||
// chunk
|
||||
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
|
||||
@@ -68,7 +69,7 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root")
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Order("number ASC")
|
||||
|
||||
@@ -91,6 +92,11 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
@@ -134,7 +140,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root")
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
@@ -162,6 +168,11 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
@@ -184,6 +195,12 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
rc, err := json.Marshal(block.RowConsumption)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal RowConsumption", "hash", block.Header.Hash().String(), "err", err)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
@@ -194,6 +211,7 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
TxNum: uint32(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
RowConsumption: string(rc),
|
||||
Header: string(header),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
|
||||
@@ -218,6 +218,7 @@ func TestBatchOrm(t *testing.T) {
|
||||
|
||||
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
|
||||
@@ -227,6 +228,7 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
@@ -235,6 +237,7 @@ func TestBatchOrm(t *testing.T) {
|
||||
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
}
|
||||
|
||||
6
bridge/testdata/blockTrace_02.json
vendored
6
bridge/testdata/blockTrace_02.json
vendored
@@ -25,6 +25,12 @@
|
||||
"baseFeePerGas": "0x1de9",
|
||||
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
|
||||
},
|
||||
"row_consumption": [
|
||||
{
|
||||
"name": "dummy",
|
||||
"row_number": 1
|
||||
}
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 0,
|
||||
|
||||
6
bridge/testdata/blockTrace_03.json
vendored
6
bridge/testdata/blockTrace_03.json
vendored
@@ -25,6 +25,12 @@
|
||||
"baseFeePerGas": "0x1a2c",
|
||||
"hash": "0x13ddd94de9c585c50c6885d4ef649292c2624ae7c8fc73781ee8785f2564b44c"
|
||||
},
|
||||
"row_consumption": [
|
||||
{
|
||||
"name": "dummy",
|
||||
"row_number": 1
|
||||
}
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 2,
|
||||
|
||||
@@ -80,8 +80,9 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -95,6 +96,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
// check db status
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.Empty(t, batch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOraclePending)
|
||||
|
||||
@@ -102,6 +104,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
l2Relayer.ProcessGasPriceOracle()
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
@@ -45,9 +45,10 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -61,6 +62,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
@@ -84,6 +86,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
batchHash := batch.Hash
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
|
||||
@@ -122,6 +125,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
|
||||
@@ -83,7 +83,7 @@ func (c *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose || c.openLog {
|
||||
fmt.Printf("%s:\n\t%v", c.name, out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
} else if strings.Contains(strings.ToLower(out), "error") || strings.Contains(strings.ToLower(out), "warning") || strings.Contains(strings.ToLower(out), "info") {
|
||||
fmt.Printf("%s:\n\t%v", c.name, out)
|
||||
}
|
||||
go c.checkFuncs.IterCb(func(_ string, value interface{}) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:scroll-v4.1.0
|
||||
FROM scrolltech/l2geth:scroll-v4.3.18
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"epoch": 30000
|
||||
@@ -19,6 +21,7 @@
|
||||
"scroll": {
|
||||
"useZktrie": true,
|
||||
"maxTxPerBlock": 44,
|
||||
"maxTxPayloadBytesPerBlock": 122880,
|
||||
"feeVaultAddress": "0x5300000000000000000000000000000000000005",
|
||||
"enableEIP2718": false,
|
||||
"enableEIP1559": false
|
||||
|
||||
@@ -14,4 +14,5 @@ fi
|
||||
exec geth --mine --datadir "." --unlock 0 --password "./password" --allow-insecure-unlock --gcmode archive --verbosity 3 \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,scroll,net,web3,debug" \
|
||||
--ws --ws.addr "0.0.0.0" --ws.port 8546 --ws.api "eth,scroll,net,web3,debug" \
|
||||
--ccc \
|
||||
--ipcpath ${IPC_PATH}
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
@@ -48,7 +48,7 @@ require (
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.2 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/influxdata/influxdb v1.8.3 // indirect
|
||||
@@ -83,7 +83,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.6.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
|
||||
@@ -187,8 +187,8 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
|
||||
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
|
||||
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
@@ -374,10 +374,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
|
||||
github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE=
|
||||
github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
|
||||
73
common/libzkp/impl/Cargo.lock
generated
73
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -432,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1045,7 +1045,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1223,7 +1223,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1436,7 +1436,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1476,7 +1476,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1611,10 +1611,26 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-gate-generator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/halo2gategen.git#35b137de2f71c37dfbd236842b868013c46739d1"
|
||||
dependencies = [
|
||||
"halo2_proofs",
|
||||
"lazy_static",
|
||||
"num-bigint",
|
||||
"rand",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-mpt-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.4#d8f957ffaf2f8f8f7a824d89e4ad4193307644bf"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#1c11b6c9b1245073a76c3ce7100b6798060f7cb8"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"halo2_proofs",
|
||||
@@ -2058,7 +2074,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2245,7 +2261,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2260,7 +2276,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2618,11 +2634,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "poseidon-circuit"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-0619#50015b7cfbd5fe3263ec683cb291396c1b99cd1f"
|
||||
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-0723#1652d54bf7ca9d8f286b53fe077d9efefdcf6d5f"
|
||||
dependencies = [
|
||||
"bitvec 1.0.1",
|
||||
"halo2_proofs",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"rand",
|
||||
"rand_xorshift",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
@@ -2733,7 +2752,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?rev=b877e67#b877e675e2d8939d03ff8d1f31e567c4f03e6f8d"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -3134,6 +3153,23 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rmd160-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/misc-precompiled-circuit.git?branch=integration#31c41ca4365dcf2b6ed4f2cdcd3dc8d2e8f080df"
|
||||
dependencies = [
|
||||
"halo2-gate-generator",
|
||||
"halo2_proofs",
|
||||
"lazy_static",
|
||||
"num-bigint",
|
||||
"rand",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruint"
|
||||
version = "1.9.0"
|
||||
@@ -3585,7 +3621,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#6eeba7783d337e0964d0d89de3d4ee8a7856fde6"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"ethereum-types 0.14.1",
|
||||
@@ -3609,7 +3645,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "snark-verifier-sdk"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#6eeba7783d337e0964d0d89de3d4ee8a7856fde6"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"env_logger 0.10.0",
|
||||
@@ -4001,7 +4037,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?rev=b877e67#b877e675e2d8939d03ff8d1f31e567c4f03e6f8d"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4446,7 +4482,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#7bca679c45debd8a033f29888af1293a2dbf4feb"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
@@ -4475,6 +4511,7 @@ dependencies = [
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
"rayon",
|
||||
"rmd160-circuits",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha3 0.10.8",
|
||||
@@ -4503,8 +4540,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zktrie"
|
||||
version = "0.1.2"
|
||||
source = "git+https://github.com/scroll-tech/zktrie.git?branch=scroll-dev-0226#1a5562f663a81ff903383db69dc6c9404b63e69d"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.6#83318659773604fa565e2ebeb810a6d3746f0af4"
|
||||
dependencies = [
|
||||
"gobuild 0.1.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@@ -18,8 +18,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", rev = "b877e67" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", rev = "b877e67" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
@@ -33,8 +33,6 @@ once_cell = "1.8.0"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
# debug-assertions = true
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
# debug-assertions = true
|
||||
|
||||
@@ -3,7 +3,7 @@ use libc::c_char;
|
||||
use prover::{
|
||||
aggregator::{Prover, Verifier},
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
ChunkHash, ChunkProof, Proof,
|
||||
BatchProof, ChunkHash, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, panic, ptr::null};
|
||||
use types::eth::BlockTrace;
|
||||
@@ -69,9 +69,9 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<Proof>(proof.as_slice()).unwrap();
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_agg_evm_proof(&proof));
|
||||
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
46
common/testdata/blockTrace_02.json
vendored
46
common/testdata/blockTrace_02.json
vendored
@@ -25,6 +25,52 @@
|
||||
"baseFeePerGas": "0x1de9",
|
||||
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
|
||||
},
|
||||
"row_consumption": [
|
||||
{
|
||||
"name": "evm",
|
||||
"row_number": 1
|
||||
},
|
||||
{
|
||||
"name": "state",
|
||||
"row_number": 2
|
||||
},
|
||||
{
|
||||
"name": "bytecode",
|
||||
"row_number": 3
|
||||
},
|
||||
{
|
||||
"name": "copy",
|
||||
"row_number": 4
|
||||
},
|
||||
{
|
||||
"name": "keccak",
|
||||
"row_number": 5
|
||||
},
|
||||
{
|
||||
"name": "tx",
|
||||
"row_number": 6
|
||||
},
|
||||
{
|
||||
"name": "rlp",
|
||||
"row_number": 7
|
||||
},
|
||||
{
|
||||
"name": "exp",
|
||||
"row_number": 8
|
||||
},
|
||||
{
|
||||
"name": "pi",
|
||||
"row_number": 9
|
||||
},
|
||||
{
|
||||
"name": "poseidon",
|
||||
"row_number": 10
|
||||
},
|
||||
{
|
||||
"name": "mpt",
|
||||
"row_number": 11
|
||||
}
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 0,
|
||||
|
||||
2
common/testdata/blockTrace_03.json
vendored
2
common/testdata/blockTrace_03.json
vendored
@@ -25,6 +25,8 @@
|
||||
"baseFeePerGas": "0x1a2c",
|
||||
"hash": "0x13ddd94de9c585c50c6885d4ef649292c2624ae7c8fc73781ee8785f2564b44c"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 2,
|
||||
|
||||
2
common/testdata/blockTrace_04.json
vendored
2
common/testdata/blockTrace_04.json
vendored
@@ -26,6 +26,8 @@
|
||||
"baseFeePerGas": null,
|
||||
"hash": "0x09f75bc27efe18cd77a82491370442ea5a6066e910b73dc99fe1caff950c357b"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 126,
|
||||
|
||||
2
common/testdata/blockTrace_05.json
vendored
2
common/testdata/blockTrace_05.json
vendored
@@ -28,6 +28,8 @@
|
||||
"baseFeePerGas": null,
|
||||
"hash": "0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 126,
|
||||
|
||||
2
common/testdata/blockTrace_06.json
vendored
2
common/testdata/blockTrace_06.json
vendored
@@ -28,6 +28,8 @@
|
||||
"baseFeePerGas": null,
|
||||
"hash": "0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 126,
|
||||
|
||||
2
common/testdata/blockTrace_07.json
vendored
2
common/testdata/blockTrace_07.json
vendored
@@ -28,6 +28,8 @@
|
||||
"baseFeePerGas": null,
|
||||
"hash": "0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 126,
|
||||
|
||||
2
common/testdata/blockTrace_delegate.json
vendored
2
common/testdata/blockTrace_delegate.json
vendored
@@ -24,6 +24,8 @@
|
||||
"baseFeePerGas": "0x1a2c",
|
||||
"hash": "0x13ddd94de9c585c50c6885d4ef649292c2624ae7c8fc73781ee8785f2564b44c"
|
||||
},
|
||||
"row_consumption": [
|
||||
],
|
||||
"transactions": [
|
||||
{
|
||||
"type": 2,
|
||||
|
||||
@@ -14,8 +14,9 @@ import (
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
|
||||
20
common/types/errno.go
Normal file
20
common/types/errno.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package types
|
||||
|
||||
const (
|
||||
// Success shows OK.
|
||||
Success = 0
|
||||
|
||||
// ErrJWTCommonErr jwt common error
|
||||
ErrJWTCommonErr = 50000
|
||||
// ErrJWTTokenExpired jwt token expired
|
||||
ErrJWTTokenExpired = 50001
|
||||
|
||||
// ErrCoordinatorParameterInvalidNo is invalid params
|
||||
ErrCoordinatorParameterInvalidNo = 20001
|
||||
// ErrCoordinatorGetTaskFailure is getting prover task error
|
||||
ErrCoordinatorGetTaskFailure = 20002
|
||||
// ErrCoordinatorHandleZkProofFailure is handle submit proof error
|
||||
ErrCoordinatorHandleZkProofFailure = 20003
|
||||
// ErrCoordinatorEmptyProofData get empty proof data
|
||||
ErrCoordinatorEmptyProofData = 20004
|
||||
)
|
||||
@@ -33,7 +33,7 @@ func (r ProofType) String() string {
|
||||
case ProofTypeBatch:
|
||||
return "proof type batch"
|
||||
default:
|
||||
return "illegal proof type"
|
||||
return fmt.Sprintf("illegal proof type: %d", r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,15 +58,12 @@ type AuthMsg struct {
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// Prover name
|
||||
Name string `json:"name"`
|
||||
// Prover ProverType
|
||||
ProverType ProofType `json:"prover_type,omitempty"`
|
||||
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
|
||||
Version string `json:"version"`
|
||||
// Random unique token generated by manager
|
||||
Token string `json:"token"`
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
|
||||
@@ -15,9 +15,9 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
|
||||
authMsg := &AuthMsg{
|
||||
Identity: &Identity{
|
||||
Name: "testName",
|
||||
Version: "testVersion",
|
||||
Token: "testToken",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
|
||||
ProverName: "test",
|
||||
ProverVersion: "v1.0.0",
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
@@ -46,15 +46,15 @@ func TestGenerateToken(t *testing.T) {
|
||||
|
||||
func TestIdentityHash(t *testing.T) {
|
||||
identity := &Identity{
|
||||
Name: "testName",
|
||||
ProverType: ProofTypeChunk,
|
||||
Version: "testVersion",
|
||||
Token: "testToken",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
|
||||
ProverName: "test",
|
||||
ProverVersion: "v1.0.0",
|
||||
}
|
||||
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "c0411a19531fb8c6133b2bae91f361c14e65f2d318aef72b83519e6061cad001"
|
||||
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestProveTypeString(t *testing.T) {
|
||||
assert.Equal(t, "proof type batch", proofTypeBatch.String())
|
||||
|
||||
illegalProof := ProofType(3)
|
||||
assert.Equal(t, "illegal proof type", illegalProof.String())
|
||||
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
|
||||
}
|
||||
|
||||
func TestProofMsgPublicKey(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.0.36"
|
||||
var tag = "v4.1.6"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
77
contracts/integration-test/ZkEvmVerifierV1.spec.ts
Normal file
77
contracts/integration-test/ZkEvmVerifierV1.spec.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { expect } from "chai";
|
||||
import { hexlify } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { ZkEvmVerifierV1 } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import fs from "fs";
|
||||
|
||||
describe("ZkEvmVerifierV1", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
|
||||
let zkEvmVerifier: ZkEvmVerifierV1;
|
||||
|
||||
beforeEach(async () => {
|
||||
[deployer] = await ethers.getSigners();
|
||||
|
||||
const bytecode = hexlify(fs.readFileSync("./src/libraries/verifier/plonk-verifier/plonk_verifier_0.5.1.bin"));
|
||||
const tx = await deployer.sendTransaction({ data: bytecode });
|
||||
const receipt = await tx.wait();
|
||||
|
||||
const ZkEvmVerifierV1 = await ethers.getContractFactory("ZkEvmVerifierV1", deployer);
|
||||
zkEvmVerifier = await ZkEvmVerifierV1.deploy(receipt.contractAddress);
|
||||
await zkEvmVerifier.deployed();
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
const proof = hexlify(fs.readFileSync("./integration-test/testdata/plonk_verifier_0.5.1_proof.data"));
|
||||
const instances = fs.readFileSync("./integration-test/testdata/plonk_verifier_0.5.1_pi.data");
|
||||
|
||||
const publicInputHash = new Uint8Array(32);
|
||||
for (let i = 0; i < 32; i++) {
|
||||
publicInputHash[i] = instances[i * 32 + 31];
|
||||
}
|
||||
|
||||
// chunk1: https://github.com/scroll-tech/test-traces/blob/674ad743beab04b57da369fa5958fb6824155bfe/erc20/1_transfer.json
|
||||
// 0000000000000005 blockNumber
|
||||
// 0000000064c3ca7c timestamp
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000 baseFee
|
||||
// 00000000007a1200 gasLimit
|
||||
// 0001 numTransactions
|
||||
// 8da3fedb103b6da8ccc2514094336d1a76df166238f4d8e8558fbe54cce2516a tx hash 0
|
||||
// chunk2: https://github.com/scroll-tech/test-traces/blob/674ad743beab04b57da369fa5958fb6824155bfe/erc20/10_transfer.json
|
||||
// 0000000000000006 blockNumber
|
||||
// 0000000064c3ca7f timestamp
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000 baseFee
|
||||
// 00000000007a1200 gasLimit
|
||||
// 000a numTransactions
|
||||
// 419164c1a7213e4e52f8578463c47a01549f69a7ff220d93221ce02909f5b919 tx hash 0
|
||||
// 6c1b03d1a9b5156e189ad2e7ba73ba71d9a83b24f9830f38dd7a597fe1e67167 tx hash 1
|
||||
// 94f981938d02b2c1d91ff370b3ed759dadc617c7347cd4b8552b275edbffd767 tx hash 2
|
||||
// bfe98147fc808a916bdff90e838e77609fd59634787443f6fc58f9a371790d09 tx hash 3
|
||||
// beb9dd0259e7c4f0a8d5ac3ba6aa3940c3e53947395f64e8ee88c7067c6d210e tx hash 4
|
||||
// 208c6c767356552ad8085fa77a99d9154e0c8cf8777e329cb76bcbc969d21fca tx hash 5
|
||||
// 37c8969833fbc6cbb88a63ccef324d7b42d0607ac0094f14e1f6d4e50f84d87f tx hash 6
|
||||
// 088c5ad45a990694ac783207fe6bda9bf97da40e1f3eb468c73941d51b99932c tx hash 7
|
||||
// c3d8ddbdfc67877a253255b9357aabfd062ce80d39eba67547f964c288660065 tx hash 8
|
||||
// ff26ca52c02b97b1a6677263d5d6dec0321fb7b49be44ae0a66ba5482b1180b4 tx hash 9
|
||||
// => chunk 0 data hash: 9390886a7d22aa43aae87e62a350c904fabc5db4487d9b25bdca446ba7ed15a1
|
||||
// => chunk 1 data hash: a8846bf9bc53f30a391ae452b5fd456cb86a99ab7bd2e1e47898ffbe3509e8eb
|
||||
// => batch data hash: ee64d77c2f2e0b2c4ac952a0f54fdba4a217c42eb26a07b28de9fbc7b009acae
|
||||
// 000000000000cf55 layer2ChainId
|
||||
// 02040e949809e8d2e56d35b4dfb876e08ee7b4608d22f23f52052425857c31ba prevStateRoot
|
||||
// 1532cdb7732da0a4ca3044914c6959b7e2b7ba4e913a9f5f0b55051e467412d9 postStateRoot
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000 withdrawRoot
|
||||
// ee64d77c2f2e0b2c4ac952a0f54fdba4a217c42eb26a07b28de9fbc7b009acae batchDataHash
|
||||
// public input hash: 9ea439164727042e029464a40901e52800095c1ade301b63b4b7453880f5723e
|
||||
expect(hexlify(publicInputHash)).to.eq("0x9ea439164727042e029464a40901e52800095c1ade301b63b4b7453880f5723e");
|
||||
|
||||
// verify ok
|
||||
await zkEvmVerifier.verify(proof, publicInputHash);
|
||||
console.log("Gas Usage:", (await zkEvmVerifier.estimateGas.verify(proof, publicInputHash)).toString());
|
||||
|
||||
// verify failed
|
||||
await expect(zkEvmVerifier.verify(proof, publicInputHash.reverse())).to.reverted;
|
||||
});
|
||||
});
|
||||
BIN
contracts/integration-test/testdata/plonk_verifier_0.5.1_pi.data
vendored
Normal file
BIN
contracts/integration-test/testdata/plonk_verifier_0.5.1_pi.data
vendored
Normal file
Binary file not shown.
BIN
contracts/integration-test/testdata/plonk_verifier_0.5.1_proof.data
vendored
Normal file
BIN
contracts/integration-test/testdata/plonk_verifier_0.5.1_proof.data
vendored
Normal file
Binary file not shown.
@@ -17,10 +17,12 @@ import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
|
||||
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
|
||||
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import {ZkEvmVerifierV1} from "../../src/libraries/verifier/ZkEvmVerifierV1.sol";
|
||||
|
||||
contract DeployL1BridgeContracts is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
@@ -29,13 +31,16 @@ contract DeployL1BridgeContracts is Script {
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
address L1_ZKEVM_VERIFIER_ADDR = vm.envAddress("L1_ZKEVM_VERIFIER_ADDR");
|
||||
|
||||
address L1_PLONK_VERIFIER_ADDR = vm.envAddress("L1_PLONK_VERIFIER_ADDR");
|
||||
|
||||
ZkEvmVerifierV1 zkEvmVerifierV1;
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployZkEvmVerifierV1();
|
||||
deployMultipleVersionRollupVerifier();
|
||||
deployProxyAdmin();
|
||||
deployL1Whitelist();
|
||||
@@ -51,12 +56,19 @@ contract DeployL1BridgeContracts is Script {
|
||||
deployL1CustomERC20Gateway();
|
||||
deployL1ERC721Gateway();
|
||||
deployL1ERC1155Gateway();
|
||||
deployL1DAIGateway();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployZkEvmVerifierV1() internal {
|
||||
zkEvmVerifierV1 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_ADDR);
|
||||
|
||||
logAddress("L1_ZKEVM_VERIFIER_V1_ADDR", address(zkEvmVerifierV1));
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
MultipleVersionRollupVerifier rollupVerifier = new MultipleVersionRollupVerifier(L1_ZKEVM_VERIFIER_ADDR);
|
||||
MultipleVersionRollupVerifier rollupVerifier = new MultipleVersionRollupVerifier(address(zkEvmVerifierV1));
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
}
|
||||
@@ -192,6 +204,18 @@ contract DeployL1BridgeContracts is Script {
|
||||
logAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1DAIGateway() internal {
|
||||
L1DAIGateway impl = new L1DAIGateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
|
||||
address(impl),
|
||||
address(proxyAdmin),
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("L1_DAI_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_DAI_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1ERC721Gateway() internal {
|
||||
L1ERC721Gateway impl = new L1ERC721Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
|
||||
|
||||
@@ -15,6 +15,7 @@ import {L2GatewayRouter} from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import {L2ScrollMessenger} from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import {L2StandardERC20Gateway} from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import {L2WETHGateway} from "../../src/L2/gateways/L2WETHGateway.sol";
|
||||
import {L2DAIGateway} from "../../src/L2/gateways/L2DAIGateway.sol";
|
||||
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {L2MessageQueue} from "../../src/L2/predeploys/L2MessageQueue.sol";
|
||||
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
@@ -59,6 +60,7 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2CustomERC20Gateway();
|
||||
deployL2ERC721Gateway();
|
||||
deployL2ERC1155Gateway();
|
||||
deployL2DAIGateway();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
@@ -199,6 +201,18 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL2DAIGateway() internal {
|
||||
L2DAIGateway impl = new L2DAIGateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
|
||||
address(impl),
|
||||
address(proxyAdmin),
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("L2_DAI_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L2_DAI_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL2ERC721Gateway() internal {
|
||||
L2ERC721Gateway impl = new L2ERC721Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
|
||||
|
||||
@@ -11,6 +11,7 @@ import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
|
||||
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
|
||||
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
|
||||
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
@@ -25,6 +26,9 @@ contract InitializeL1BridgeContracts is Script {
|
||||
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
|
||||
address L1_DAI_ADDR = vm.envAddress("L1_DAI_ADDR");
|
||||
address L2_DAI_ADDR = vm.envAddress("L2_DAI_ADDR");
|
||||
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
|
||||
@@ -37,6 +41,7 @@ contract InitializeL1BridgeContracts is Script {
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L1_DAI_GATEWAY_PROXY_ADDR");
|
||||
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
|
||||
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
|
||||
@@ -48,6 +53,7 @@ contract InitializeL1BridgeContracts is Script {
|
||||
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L2_DAI_GATEWAY_PROXY_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
|
||||
@@ -143,12 +149,22 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
// initialize L1DAIGateway
|
||||
L1DAIGateway(L1_DAI_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_DAI_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
L1DAIGateway(L1_DAI_GATEWAY_PROXY_ADDR).updateTokenMapping(L1_DAI_ADDR, L2_DAI_ADDR);
|
||||
|
||||
// set WETH and DAI gateways in router
|
||||
{
|
||||
address[] memory _tokens = new address[](1);
|
||||
address[] memory _tokens = new address[](2);
|
||||
_tokens[0] = L1_WETH_ADDR;
|
||||
address[] memory _gateways = new address[](1);
|
||||
_tokens[1] = L1_DAI_ADDR;
|
||||
address[] memory _gateways = new address[](2);
|
||||
_gateways[0] = L1_WETH_GATEWAY_PROXY_ADDR;
|
||||
_gateways[1] = L1_DAI_GATEWAY_PROXY_ADDR;
|
||||
L1GatewayRouter(L1_GATEWAY_ROUTER_PROXY_ADDR).setERC20Gateway(_tokens, _gateways);
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import {L2ETHGateway} from "../../src/L2/gateways/L2ETHGateway.sol";
|
||||
import {L2GatewayRouter} from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import {L2StandardERC20Gateway} from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import {L2WETHGateway} from "../../src/L2/gateways/L2WETHGateway.sol";
|
||||
import {L2DAIGateway} from "../../src/L2/gateways/L2DAIGateway.sol";
|
||||
import {L2MessageQueue} from "../../src/L2/predeploys/L2MessageQueue.sol";
|
||||
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
|
||||
@@ -22,6 +23,9 @@ contract InitializeL2BridgeContracts is Script {
|
||||
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
address L1_DAI_ADDR = vm.envAddress("L1_DAI_ADDR");
|
||||
address L2_DAI_ADDR = vm.envAddress("L2_DAI_ADDR");
|
||||
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
@@ -30,6 +34,7 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L1_DAI_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L1_GAS_PRICE_ORACLE_ADDR = vm.envAddress("L1_GAS_PRICE_ORACLE_ADDR");
|
||||
@@ -44,6 +49,7 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L2_DAI_GATEWAY_PROXY_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
|
||||
function run() external {
|
||||
@@ -111,12 +117,22 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
// initialize L2DAIGateway
|
||||
L2DAIGateway(L2_DAI_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_DAI_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
L2DAIGateway(L2_DAI_GATEWAY_PROXY_ADDR).updateTokenMapping(L2_DAI_ADDR, L1_DAI_ADDR);
|
||||
|
||||
// set WETH and DAI gateways in router
|
||||
{
|
||||
address[] memory _tokens = new address[](1);
|
||||
address[] memory _tokens = new address[](2);
|
||||
_tokens[0] = L2_WETH_ADDR;
|
||||
address[] memory _gateways = new address[](1);
|
||||
_tokens[1] = L2_DAI_ADDR;
|
||||
address[] memory _gateways = new address[](2);
|
||||
_gateways[0] = L2_WETH_GATEWAY_PROXY_ADDR;
|
||||
_gateways[1] = L2_DAI_GATEWAY_PROXY_ADDR;
|
||||
L2GatewayRouter(L2_GATEWAY_ROUTER_PROXY_ADDR).setERC20Gateway(_tokens, _gateways);
|
||||
}
|
||||
|
||||
|
||||
11
contracts/src/L1/gateways/L1DAIGateway.sol
Normal file
11
contracts/src/L1/gateways/L1DAIGateway.sol
Normal file
@@ -0,0 +1,11 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {L1CustomERC20Gateway} from "./L1CustomERC20Gateway.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
contract L1DAIGateway is L1CustomERC20Gateway {
|
||||
|
||||
}
|
||||
11
contracts/src/L2/gateways/L2DAIGateway.sol
Normal file
11
contracts/src/L2/gateways/L2DAIGateway.sol
Normal file
@@ -0,0 +1,11 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {L2CustomERC20Gateway} from "./L2CustomERC20Gateway.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
contract L2DAIGateway is L2CustomERC20Gateway {
|
||||
|
||||
}
|
||||
65
contracts/src/libraries/verifier/ZkEvmVerifierV1.sol
Normal file
65
contracts/src/libraries/verifier/ZkEvmVerifierV1.sol
Normal file
@@ -0,0 +1,65 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {IZkEvmVerifier} from "./IZkEvmVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
contract ZkEvmVerifierV1 is IZkEvmVerifier {
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when aggregate zk proof verification is failed.
|
||||
error VerificationFailed();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The address of highly optimized plonk verifier contract.
|
||||
address public immutable plonkVerifier;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _verifier) {
|
||||
plonkVerifier = _verifier;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IZkEvmVerifier
|
||||
function verify(bytes calldata aggrProof, bytes32 publicInputHash) external view override {
|
||||
address _verifier = plonkVerifier;
|
||||
bool success;
|
||||
|
||||
// 1. the first 12 * 32 (0x180) bytes of `aggrProof` is `accumulator`
|
||||
// 2. the rest bytes of `aggrProof` is the actual `batch_aggregated_proof`
|
||||
// 3. each byte of the `public_input_hash` should be converted to a `uint256` and the
|
||||
// 1024 (0x400) bytes should inserted between `accumulator` and `batch_aggregated_proof`.
|
||||
assembly {
|
||||
let p := mload(0x40)
|
||||
calldatacopy(p, aggrProof.offset, 0x180)
|
||||
for {
|
||||
let i := 0
|
||||
} lt(i, 0x400) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(add(p, sub(0x560, i)), and(publicInputHash, 0xff))
|
||||
publicInputHash := shr(8, publicInputHash)
|
||||
}
|
||||
calldatacopy(add(p, 0x580), add(aggrProof.offset, 0x180), sub(aggrProof.length, 0x180))
|
||||
|
||||
success := staticcall(gas(), _verifier, p, add(aggrProof.length, 0x400), 0x00, 0x00)
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
revert VerificationFailed();
|
||||
}
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -37,7 +37,7 @@ make lint
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManagerConfig` in [`config/config.go`](config/config.go) for more details.
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// Client defines typed wrappers for the Ethereum RPC API.
|
||||
type Client struct {
|
||||
client *rpc.Client
|
||||
}
|
||||
|
||||
// Dial connects a client to the given URL.
|
||||
func Dial(rawurl string) (*Client, error) {
|
||||
return DialContext(context.Background(), rawurl)
|
||||
}
|
||||
|
||||
// DialContext connects a client to the given URL with a given context.
|
||||
func DialContext(ctx context.Context, rawurl string) (*Client, error) {
|
||||
c, err := rpc.DialContext(ctx, rawurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClient(c), nil
|
||||
}
|
||||
|
||||
// NewClient creates a client that uses the given RPC client.
|
||||
func NewClient(c *rpc.Client) *Client {
|
||||
return &Client{client: c}
|
||||
}
|
||||
|
||||
// RequestToken generates token for prover
|
||||
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
|
||||
var token string
|
||||
err := c.client.CallContext(ctx, &token, "prover_requestToken", authMsg)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// RegisterAndSubscribe subscribe prover and register, verified by sign data.
|
||||
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
|
||||
return c.client.Subscribe(ctx, "prover", taskCh, "register", authMsg)
|
||||
}
|
||||
|
||||
// SubmitProof get proof from prover.
|
||||
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) error {
|
||||
return c.client.CallContext(ctx, nil, "prover_submitProof", proof)
|
||||
}
|
||||
@@ -2,13 +2,17 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -20,7 +24,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -49,15 +53,12 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(subCtx, db, cfg)
|
||||
|
||||
provermanager.InitProverManager(db)
|
||||
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
cancel()
|
||||
@@ -66,34 +67,24 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
router := gin.Default()
|
||||
api.InitController(cfg, db)
|
||||
route.Route(router, cfg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
apis := api.RegisterAPIs(cfg, db)
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(fmt.Sprintf("%s:%d", ctx.String(httpListenAddrFlag.Name), ctx.Int(httpPortFlag.Name)), apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start RPC api", "error", err)
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); err != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator http server failure", "error", runServerErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
// Register api and start ws service.
|
||||
if ctx.Bool(wsEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.ProverManagerConfig.CompressionLevel)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}()
|
||||
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}
|
||||
}()
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
@@ -101,7 +92,17 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
log.Info("start shutdown coordinator server ...")
|
||||
|
||||
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelExit()
|
||||
if err = srv.Shutdown(closeCtx); err != nil {
|
||||
log.Warn("shutdown coordinator server failure", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
<-closeCtx.Done()
|
||||
log.Info("coordinator server exiting success")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
wsStartPort int64 = 40000
|
||||
httpStartPort int64 = 40000
|
||||
)
|
||||
|
||||
// CoordinatorApp coordinator-test client manager.
|
||||
@@ -29,7 +29,7 @@ type CoordinatorApp struct {
|
||||
|
||||
originFile string
|
||||
coordinatorFile string
|
||||
WSPort int64
|
||||
HTTPPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
@@ -39,13 +39,13 @@ type CoordinatorApp struct {
|
||||
func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
wsPort := port.Int64() + wsStartPort
|
||||
httpPort := port.Int64() + httpStartPort
|
||||
coordinatorApp := &CoordinatorApp{
|
||||
base: base,
|
||||
originFile: file,
|
||||
coordinatorFile: coordinatorFile,
|
||||
WSPort: wsPort,
|
||||
args: []string{"--log.debug", "--config", coordinatorFile, "--ws", "--ws.port", strconv.Itoa(int(wsPort))},
|
||||
HTTPPort: httpPort,
|
||||
args: []string{"--log.debug", "--config", coordinatorFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
|
||||
}
|
||||
if err := coordinatorApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
@@ -67,9 +67,9 @@ func (c *CoordinatorApp) Free() {
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
|
||||
// WSEndpoint returns ws endpoint.
|
||||
func (c *CoordinatorApp) WSEndpoint() string {
|
||||
return fmt.Sprintf("ws://localhost:%d", c.WSPort)
|
||||
// HTTPEndpoint returns ws endpoint.
|
||||
func (c *CoordinatorApp) HTTPEndpoint() string {
|
||||
return fmt.Sprintf("http://localhost:%d", c.HTTPPort)
|
||||
}
|
||||
|
||||
// MockConfig creates a new coordinator config.
|
||||
@@ -80,14 +80,15 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
return err
|
||||
}
|
||||
// Reset prover manager config for manager test cases.
|
||||
cfg.ProverManagerConfig = &coordinatorConfig.ProverManagerConfig{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 1,
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
CollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
}
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
cfg.L2Config.ChainID = 111
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
cfg.L2.ChainID = 111
|
||||
c.Config = cfg
|
||||
|
||||
if !store {
|
||||
|
||||
@@ -1,25 +1,27 @@
|
||||
{
|
||||
"prover_manager_config": {
|
||||
"compression_level": 9,
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 2,
|
||||
"collection_time": 180,
|
||||
"token_time_to_live": 60,
|
||||
"session_attempts": 5,
|
||||
"collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path": ""
|
||||
},
|
||||
"max_verifier_workers": 10,
|
||||
"order_session": "ASC"
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
"db_config": {
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2_config": {
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 10
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,52 +3,67 @@ module scroll-tech/coordinator
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.3.0
|
||||
gorm.io/gorm v1.25.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.6.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -11,29 +13,62 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
@@ -44,10 +79,15 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@@ -55,16 +95,30 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -73,64 +127,132 @@ github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
|
||||
github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE=
|
||||
github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
|
||||
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
||||
@@ -2,50 +2,46 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNumberOfVerifierWorkers = 10
|
||||
defaultNumberOfSessionRetryAttempts = 2
|
||||
)
|
||||
|
||||
// ProverManagerConfig loads sequencer configuration items.
|
||||
type ProverManagerConfig struct {
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// ProverManager loads sequencer configuration items.
|
||||
type ProverManager struct {
|
||||
// The amount of provers to pick per proof generation session.
|
||||
ProversPerSession uint8 `json:"provers_per_session"`
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts,omitempty"`
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier,omitempty"`
|
||||
// Proof collection time (in minutes).
|
||||
CollectionTime int `json:"collection_time"`
|
||||
// Token time to live (in seconds)
|
||||
TokenTimeToLive int `json:"token_time_to_live"`
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// Proof collection time (in seconds).
|
||||
CollectionTimeSec int `json:"collection_time_sec"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers,omitempty"`
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers"`
|
||||
}
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
type L2Config struct {
|
||||
// L2 loads l2geth configuration items.
|
||||
type L2 struct {
|
||||
// l2geth chain_id.
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
}
|
||||
|
||||
// Auth provides the auth of prover-stats-api
|
||||
type Auth struct {
|
||||
Secret string `json:"secret"`
|
||||
ChallengeExpireDurationSec int `json:"challenge_expire_duration_sec"`
|
||||
LoginExpireDurationSec int `json:"token_expire_duration_sec"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
ProverManagerConfig *ProverManagerConfig `json:"prover_manager_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
ProverManager *ProverManager `json:"prover_manager"`
|
||||
DB *database.Config `json:"db"`
|
||||
L2 *L2 `json:"l2"`
|
||||
Auth *Auth `json:"auth"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
@@ -68,19 +64,5 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check prover's order session
|
||||
order := strings.ToUpper(cfg.ProverManagerConfig.OrderSession)
|
||||
if len(order) > 0 && !(order == "ASC" || order == "DESC") {
|
||||
return nil, errors.New("prover config's order session is invalid")
|
||||
}
|
||||
cfg.ProverManagerConfig.OrderSession = order
|
||||
|
||||
if cfg.ProverManagerConfig.MaxVerifierWorkers == 0 {
|
||||
cfg.ProverManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
|
||||
}
|
||||
if cfg.ProverManagerConfig.SessionAttempts == 0 {
|
||||
cfg.ProverManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -12,29 +12,31 @@ import (
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
configTemplate := `{
|
||||
"prover_manager_config": {
|
||||
"compression_level": 9,
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": %d,
|
||||
"collection_time": 180,
|
||||
"token_time_to_live": 60,
|
||||
"session_attempts": 5,
|
||||
"collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
},
|
||||
"max_verifier_workers": %d,
|
||||
"order_session": "%s"
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
"db_config": {
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2_config": {
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
}
|
||||
}`
|
||||
|
||||
t.Run("Success Case", func(t *testing.T) {
|
||||
@@ -44,8 +46,7 @@ func TestConfig(t *testing.T) {
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
assert.NoError(t, os.Remove(tmpFile.Name()))
|
||||
}()
|
||||
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, defaultNumberOfVerifierWorkers, "ASC")
|
||||
_, err = tmpFile.WriteString(config)
|
||||
_, err = tmpFile.WriteString(configTemplate)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg, err := NewConfig(tmpFile.Name())
|
||||
@@ -86,52 +87,4 @@ func TestConfig(t *testing.T) {
|
||||
_, err = NewConfig(tmpFile.Name())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Invalid Order Session", func(t *testing.T) {
|
||||
tmpFile, err := os.CreateTemp("", "example")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
assert.NoError(t, os.Remove(tmpFile.Name()))
|
||||
}()
|
||||
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, defaultNumberOfVerifierWorkers, "INVALID")
|
||||
_, err = tmpFile.WriteString(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = NewConfig(tmpFile.Name())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "prover config's order session is invalid")
|
||||
})
|
||||
|
||||
t.Run("Default MaxVerifierWorkers", func(t *testing.T) {
|
||||
tmpFile, err := os.CreateTemp("", "example")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
assert.NoError(t, os.Remove(tmpFile.Name()))
|
||||
}()
|
||||
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, 0, "ASC")
|
||||
_, err = tmpFile.WriteString(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg, err := NewConfig(tmpFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.ProverManagerConfig.MaxVerifierWorkers)
|
||||
})
|
||||
|
||||
t.Run("Default SessionAttempts", func(t *testing.T) {
|
||||
tmpFile, err := os.CreateTemp("", "example")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
assert.NoError(t, os.Remove(tmpFile.Name()))
|
||||
}()
|
||||
config := fmt.Sprintf(configTemplate, 0, defaultNumberOfVerifierWorkers, "ASC")
|
||||
_, err = tmpFile.WriteString(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg, err := NewConfig(tmpFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(defaultNumberOfSessionRetryAttempts), cfg.ProverManagerConfig.SessionAttempts)
|
||||
})
|
||||
}
|
||||
|
||||
93
coordinator/internal/controller/api/auth.go
Normal file
93
coordinator/internal/controller/api/auth.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/auth"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// AuthController is login API
|
||||
type AuthController struct {
|
||||
loginLogic *auth.LoginLogic
|
||||
}
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
var login types.LoginParameter
|
||||
if err := c.ShouldBind(&login); err != nil {
|
||||
return "", fmt.Errorf("missing the public_key, err:%w", err)
|
||||
}
|
||||
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", fmt.Errorf("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
return login, nil
|
||||
}
|
||||
|
||||
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
|
||||
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
v, ok := data.(types.LoginParameter)
|
||||
if !ok {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityHandler replies to client for /login
|
||||
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
claims := jwt.ExtractClaims(c)
|
||||
if proverName, ok := claims[types.ProverName]; ok {
|
||||
c.Set(types.ProverName, proverName)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
c.Set(types.PublicKey, publicKey)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
32
coordinator/internal/controller/api/controller.go
Normal file
32
coordinator/internal/controller/api/controller.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// GetTask the prover task controller
|
||||
GetTask *GetTaskController
|
||||
// SubmitProof the submit proof controller
|
||||
SubmitProof *SubmitProofController
|
||||
// HealthCheck the health check controller
|
||||
HealthCheck *HealthCheckController
|
||||
// Auth the auth controller
|
||||
Auth *AuthController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.Config, db *gorm.DB) {
|
||||
initControllerOnce.Do(func() {
|
||||
Auth = NewAuthController(db)
|
||||
HealthCheck = NewHealthCheckController()
|
||||
GetTask = NewGetTaskController(cfg, db)
|
||||
SubmitProof = NewSubmitProofController(cfg, db)
|
||||
})
|
||||
}
|
||||
86
coordinator/internal/controller/api/get_task.go
Normal file
86
coordinator/internal/controller/api/get_task.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provertask"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, db *gorm.DB) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, db)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, db)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
}
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
|
||||
|
||||
return ptc
|
||||
}
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
nerr := fmt.Errorf("parameter wrong proof type")
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := proverTask.Assign(ctx, &getTaskParameter)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("return prover task err:%w", err)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorGetTaskFailure, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
nerr := fmt.Errorf("get empty prover task")
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorEmptyProofData, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
coordinatorType.RenderJSON(ctx, types.Success, nil, result)
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
|
||||
proofType := message.ProofType(para.TaskType)
|
||||
|
||||
proofTypes := []message.ProofType{
|
||||
message.ProofTypeChunk,
|
||||
message.ProofTypeBatch,
|
||||
}
|
||||
|
||||
if proofType == message.ProofTypeUndefined {
|
||||
rand.Shuffle(len(proofTypes), func(i, j int) {
|
||||
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
|
||||
})
|
||||
proofType = proofTypes[0]
|
||||
}
|
||||
return proofType
|
||||
}
|
||||
23
coordinator/internal/controller/api/health_check.go
Normal file
23
coordinator/internal/controller/api/health_check.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// HealthCheckController is health check API
|
||||
type HealthCheckController struct {
|
||||
}
|
||||
|
||||
// NewHealthCheckController returns an HealthCheckController instance
|
||||
func NewHealthCheckController() *HealthCheckController {
|
||||
return &HealthCheckController{}
|
||||
}
|
||||
|
||||
// HealthCheck the api controller for coordinator health check
|
||||
func (a *HealthCheckController) HealthCheck(c *gin.Context) {
|
||||
types.RenderJSON(c, ctypes.Success, nil, nil)
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/proof"
|
||||
)
|
||||
|
||||
// ProverController the prover api controller
|
||||
type ProverController struct {
|
||||
tokenCache *cache.Cache
|
||||
proofReceiver *proof.ZKProofReceiver
|
||||
taskWorker *proof.TaskWorker
|
||||
}
|
||||
|
||||
// NewProverController create a prover controller
|
||||
func NewProverController(cfg *config.ProverManagerConfig, db *gorm.DB) *ProverController {
|
||||
return &ProverController{
|
||||
proofReceiver: proof.NewZKProofReceiver(cfg, db),
|
||||
taskWorker: proof.NewTaskWorker(),
|
||||
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
// RequestToken get request token of authMsg
|
||||
func (r *ProverController) RequestToken(authMsg *message.AuthMsg) (string, error) {
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return "", errors.New("signature verification failed")
|
||||
}
|
||||
pubkey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("RequestToken auth msg public key error:%w", err)
|
||||
}
|
||||
if token, ok := r.tokenCache.Get(pubkey); ok {
|
||||
return token.(string), nil
|
||||
}
|
||||
token, err := message.GenerateToken()
|
||||
if err != nil {
|
||||
return "", errors.New("token generation failed")
|
||||
}
|
||||
r.tokenCache.SetDefault(pubkey, token)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// VerifyToken verifies pubkey for token and expiration time
|
||||
func (r *ProverController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
|
||||
pubkey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("verify token auth msg public key error:%w", err)
|
||||
}
|
||||
// GetValue returns nil if value is expired
|
||||
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
|
||||
return false, fmt.Errorf("failed to find corresponding token. prover name: %s prover pk: %s", authMsg.Identity.Name, pubkey)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Register register api for prover
|
||||
func (r *ProverController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
// Verify register message.
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return nil, errors.New("signature verification failed")
|
||||
}
|
||||
// Lock here to avoid malicious prover message replay before cleanup of token
|
||||
if ok, err := r.verifyToken(authMsg); !ok {
|
||||
return nil, err
|
||||
}
|
||||
pubkey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("register auth msg public key error:%w", err)
|
||||
}
|
||||
// prover successfully registered, remove token associated with this prover
|
||||
r.tokenCache.Delete(pubkey)
|
||||
|
||||
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
|
||||
if err != nil {
|
||||
return rpcSub, err
|
||||
}
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// SubmitProof prover pull proof
|
||||
func (r *ProverController) SubmitProof(proof *message.ProofMsg) error {
|
||||
// Verify the signature
|
||||
if ok, err := proof.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify proof message", "error", err)
|
||||
}
|
||||
return errors.New("auth signature verify fail")
|
||||
}
|
||||
|
||||
err := r.proofReceiver.HandleZkProof(context.Background(), proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,304 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/proof"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "prover_test1",
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, authMsg.SignWithKey(privKey))
|
||||
return authMsg, privKey
|
||||
}
|
||||
|
||||
var proverController *ProverController
|
||||
|
||||
func init() {
|
||||
conf := &config.ProverManagerConfig{
|
||||
TokenTimeToLive: 120,
|
||||
}
|
||||
conf.Verifier = &config.VerifierConfig{MockMode: true}
|
||||
proverController = NewProverController(conf, nil)
|
||||
}
|
||||
|
||||
func TestProver_RequestToken(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "prover_test_request_token",
|
||||
},
|
||||
}
|
||||
token, err := proverController.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token has already been distributed", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
key, err := tmpAuthMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
proverController.tokenCache.Set(key, tokenCacheStored, time.Hour)
|
||||
token, err := proverController.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, token, tokenCacheStored)
|
||||
})
|
||||
|
||||
convey.Convey("token generation failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return "", errors.New("token generation failed")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := proverController.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token generation success", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return tokenCacheStored, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := proverController.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tokenCacheStored, token)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProver_Register(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "prover_test_register",
|
||||
},
|
||||
}
|
||||
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("verify token failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return false, errors.New("verify token failure")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("notifier failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
|
||||
return nil, false
|
||||
})
|
||||
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
|
||||
assert.Equal(t, *subscription, rpc.Subscription{})
|
||||
})
|
||||
|
||||
convey.Convey("register failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var taskWorker *proof.TaskWorker
|
||||
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
return nil, errors.New("register error")
|
||||
})
|
||||
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("register success", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var taskWorker *proof.TaskWorker
|
||||
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
return nil, nil
|
||||
})
|
||||
_, err := proverController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProver_SubmitProof(t *testing.T) {
|
||||
tmpAuthMsg, prvKey := geneAuthMsg(t)
|
||||
pubKey, err := tmpAuthMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
id := "provers_info_test"
|
||||
tmpProof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
Type: message.ProofTypeChunk,
|
||||
ID: id,
|
||||
Status: message.StatusOk,
|
||||
ChunkProof: &message.ChunkProof{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, tmpProof.Sign(prvKey))
|
||||
proofPubKey, err := tmpProof.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubKey, proofPubKey)
|
||||
|
||||
var proverTaskOrm *orm.ProverTask
|
||||
patchGuard := gomonkey.ApplyMethodFunc(proverTaskOrm, "GetProverTasks", func(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]orm.ProverTask, error) {
|
||||
return nil, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
provermanager.InitProverManager(nil)
|
||||
|
||||
taskChan, err := provermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
|
||||
assert.NotNil(t, taskChan)
|
||||
assert.NoError(t, err)
|
||||
|
||||
convey.Convey("verify failure", t, func() {
|
||||
var s *message.ProofMsg
|
||||
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return false, errors.New("proof verify error")
|
||||
})
|
||||
err = proverController.SubmitProof(tmpProof)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
var s *message.ProofMsg
|
||||
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
|
||||
var chunkOrm *orm.Chunk
|
||||
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProofByHash", func(context.Context, string, *message.BatchProof, uint64, ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProofByHash", func(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("get none provers of prover task", t, func() {
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
|
||||
return nil, nil
|
||||
})
|
||||
tmpProof1 := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: "10001",
|
||||
Status: message.StatusOk,
|
||||
ChunkProof: &message.ChunkProof{},
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
tmpProof1.Sign(privKey)
|
||||
_, err1 := tmpProof1.PublicKey()
|
||||
assert.NoError(t, err1)
|
||||
err2 := proverController.SubmitProof(tmpProof1)
|
||||
fmt.Println(err2)
|
||||
targetErr := fmt.Errorf("validator failure get none prover task for the proof")
|
||||
assert.Equal(t, err2.Error(), targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
|
||||
now := time.Now()
|
||||
s := &orm.ProverTask{
|
||||
TaskID: id,
|
||||
ProverPublicKey: proofPubKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: "provers_info_test",
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
CreatedAt: now,
|
||||
}
|
||||
return s, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
})
|
||||
|
||||
convey.Convey("proof msg status is not ok", t, func() {
|
||||
tmpProof.Status = message.StatusProofError
|
||||
err1 := proverController.SubmitProof(tmpProof)
|
||||
assert.NoError(t, err1)
|
||||
})
|
||||
tmpProof.Status = message.StatusOk
|
||||
|
||||
var db *gorm.DB
|
||||
patchGuard.ApplyMethodFunc(db, "Transaction", func(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) (err error) {
|
||||
return nil
|
||||
})
|
||||
|
||||
var tmpVerifier *verifier.Verifier
|
||||
convey.Convey("verifier proof failure", t, func() {
|
||||
targetErr := errors.New("verify proof failure")
|
||||
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyChunkProof", func(proof *message.ChunkProof) (bool, error) {
|
||||
return false, targetErr
|
||||
})
|
||||
err1 := proverController.SubmitProof(tmpProof)
|
||||
assert.Nil(t, err1)
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyChunkProof", func(proof *message.ChunkProof) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, proversInfo *coordinatorType.ProversInfo) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
err1 := proverController.SubmitProof(tmpProof)
|
||||
assert.Nil(t, err1)
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// ProverAPI for provers inorder to register and submit proof
|
||||
type ProverAPI interface {
|
||||
RequestToken(authMsg *message.AuthMsg) (string, error)
|
||||
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
|
||||
SubmitProof(proof *message.ProofMsg) error
|
||||
}
|
||||
|
||||
// RegisterAPIs register api for coordinator
|
||||
func RegisterAPIs(cfg *config.Config, db *gorm.DB) []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "prover",
|
||||
Service: ProverAPI(NewProverController(cfg.ProverManagerConfig, db)),
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
72
coordinator/internal/controller/api/submit_proof.go
Normal file
72
coordinator/internal/controller/api/submit_proof.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/submitproof"
|
||||
coodinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
type SubmitProofController struct {
|
||||
submitProofReceiverLogic *submitproof.ProofReceiverLogic
|
||||
}
|
||||
|
||||
// NewSubmitProofController create the submit proof api controller instance
|
||||
func NewSubmitProofController(cfg *config.Config, db *gorm.DB) *SubmitProofController {
|
||||
return &SubmitProofController{
|
||||
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db),
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var spp coodinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&spp); err != nil {
|
||||
nerr := fmt.Errorf("parameter invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
proofMsg := message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: spp.TaskID,
|
||||
Type: message.ProofType(spp.TaskType),
|
||||
Status: message.RespStatus(spp.Status),
|
||||
},
|
||||
}
|
||||
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
}
|
||||
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/collector"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
@@ -22,11 +21,8 @@ type Collector struct {
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
|
||||
stopRunChan chan struct{}
|
||||
stopTimeoutChan chan struct{}
|
||||
|
||||
collectors map[message.ProofType]collector.Collector
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
@@ -38,18 +34,12 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collect
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopRunChan: make(chan struct{}),
|
||||
stopTimeoutChan: make(chan struct{}),
|
||||
collectors: make(map[message.ProofType]collector.Collector),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
}
|
||||
|
||||
c.collectors[message.ProofTypeBatch] = collector.NewBatchProofCollector(cfg, db)
|
||||
c.collectors[message.ProofTypeChunk] = collector.NewChunkProofCollector(cfg, db)
|
||||
|
||||
go c.run()
|
||||
go c.timeoutProofTask()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
@@ -59,40 +49,9 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collect
|
||||
|
||||
// Stop all the collector
|
||||
func (c *Collector) Stop() {
|
||||
c.stopRunChan <- struct{}{}
|
||||
c.stopTimeoutChan <- struct{}{}
|
||||
}
|
||||
|
||||
// run loop and cron collect
|
||||
func (c *Collector) run() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("collector panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
for _, tmpCollector := range c.collectors {
|
||||
if err := tmpCollector.Collect(c.ctx); err != nil {
|
||||
log.Warn("collect data to prover failure", "collector name", tmpCollector.Name(), "error", err)
|
||||
}
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopRunChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
func (c *Collector) timeoutProofTask() {
|
||||
@@ -114,7 +73,7 @@ func (c *Collector) timeoutProofTask() {
|
||||
}
|
||||
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
timeoutDuration := time.Duration(c.cfg.ProverManagerConfig.CollectionTime) * time.Minute
|
||||
timeoutDuration := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
if time.Since(assignedProverTask.AssignedAt) >= timeoutDuration {
|
||||
|
||||
25
coordinator/internal/logic/auth/login.go
Normal file
25
coordinator/internal/logic/auth/login.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
// LoginLogic the auth logic
|
||||
type LoginLogic struct {
|
||||
challengeOrm *orm.Challenge
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB) *LoginLogic {
|
||||
return &LoginLogic{
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
}
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx, challenge)
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// BatchProofCollector is collector implement for batch proof
|
||||
type BatchProofCollector struct {
|
||||
BaseCollector
|
||||
}
|
||||
|
||||
// NewBatchProofCollector new a batch collector
|
||||
func NewBatchProofCollector(cfg *config.Config, db *gorm.DB) *BatchProofCollector {
|
||||
bp := &BatchProofCollector{
|
||||
BaseCollector: BaseCollector{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return bp
|
||||
}
|
||||
|
||||
// Name return the batch proof collector name
|
||||
func (bp *BatchProofCollector) Name() string {
|
||||
return BatchCollectorName
|
||||
}
|
||||
|
||||
// Collect load and send batch tasks
|
||||
func (bp *BatchProofCollector) Collect(ctx context.Context) error {
|
||||
batchTasks, err := bp.batchOrm.GetUnassignedBatches(ctx, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(batchTasks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash)
|
||||
|
||||
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch) == 0 {
|
||||
return fmt.Errorf("no idle common prover when starting proof generation session, id:%s", batchTask.Hash)
|
||||
}
|
||||
|
||||
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
|
||||
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
}
|
||||
|
||||
proverStatusList, err := bp.sendTask(ctx, batchTask.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
|
||||
}
|
||||
|
||||
transErr := bp.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Update session proving status as assigned.
|
||||
if err = bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
|
||||
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
|
||||
}
|
||||
|
||||
for _, proverStatus := range proverStatusList {
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: proverStatus.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: proverStatus.Name,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
|
||||
return fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return transErr
|
||||
}
|
||||
|
||||
func (bp *BatchProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.ProverStatus, error) {
|
||||
// get chunks from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, hash)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", hash, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
taskDetail := &message.BatchTaskDetail{}
|
||||
for _, chunk := range chunks {
|
||||
chunkInfo := &message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2Config.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
taskDetail.ChunkInfos = append(taskDetail.ChunkInfos, chunkInfo)
|
||||
|
||||
chunkProof := &message.ChunkProof{}
|
||||
if err := json.Unmarshal(chunk.Proof, chunkProof); err != nil {
|
||||
return nil, fmt.Errorf("json Unmarshal ChunkProof error: %w, chunk hash: %v", err, chunk.Hash)
|
||||
}
|
||||
taskDetail.ChunkProofs = append(taskDetail.ChunkProofs, chunkProof)
|
||||
}
|
||||
|
||||
taskMsg := &message.TaskMsg{
|
||||
ID: hash,
|
||||
Type: message.ProofTypeBatch,
|
||||
ChunkTaskDetail: nil,
|
||||
BatchTaskDetail: taskDetail,
|
||||
}
|
||||
return bp.BaseCollector.sendTask(taskMsg)
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ChunkProofCollector the chunk proof collector
|
||||
type ChunkProofCollector struct {
|
||||
BaseCollector
|
||||
}
|
||||
|
||||
// NewChunkProofCollector new a chunk proof collector
|
||||
func NewChunkProofCollector(cfg *config.Config, db *gorm.DB) *ChunkProofCollector {
|
||||
cp := &ChunkProofCollector{
|
||||
BaseCollector: BaseCollector{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// Name return a block batch collector name
|
||||
func (cp *ChunkProofCollector) Name() string {
|
||||
return ChunkCollectorName
|
||||
}
|
||||
|
||||
// Collect the chunk proof which need to prove
|
||||
func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.GetUnassignedChunks(ctx, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash)
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
|
||||
return fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
}
|
||||
|
||||
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk) == 0 {
|
||||
return fmt.Errorf("no idle chunk prover when starting proof generation session, id:%s", chunkTask.Hash)
|
||||
}
|
||||
|
||||
proverStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
|
||||
}
|
||||
|
||||
transErr := cp.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Update session proving status as assigned.
|
||||
if err = cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
|
||||
log.Error("failed to update task status", "id", chunkTask.Hash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, proverStatus := range proverStatusList {
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: proverStatus.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: proverStatus.Name,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
|
||||
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, proverStatus.PublicKey, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return transErr
|
||||
}
|
||||
|
||||
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.ProverStatus, error) {
|
||||
// Get block hashes.
|
||||
wrappedBlocks, err := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, err)
|
||||
}
|
||||
blockHashes := make([]common.Hash, len(wrappedBlocks))
|
||||
for i, wrappedBlock := range wrappedBlocks {
|
||||
blockHashes[i] = wrappedBlock.Header.Hash()
|
||||
}
|
||||
taskMsg := &message.TaskMsg{
|
||||
ID: hash,
|
||||
Type: message.ProofTypeChunk,
|
||||
ChunkTaskDetail: &message.ChunkTaskDetail{BlockHashes: blockHashes},
|
||||
BatchTaskDetail: nil,
|
||||
}
|
||||
return cp.BaseCollector.sendTask(taskMsg)
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package proof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
)
|
||||
|
||||
var coordinatorProversDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/provers/disconnects/total", metrics.ScrollRegistry)
|
||||
|
||||
// TaskWorker held the prover task connection
|
||||
type TaskWorker struct{}
|
||||
|
||||
// NewTaskWorker create a task worker
|
||||
func NewTaskWorker() *TaskWorker {
|
||||
return &TaskWorker{}
|
||||
}
|
||||
|
||||
// AllocTaskWorker alloc a task worker goroutine
|
||||
func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
pubKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return &rpc.Subscription{}, fmt.Errorf("AllocTaskWorker auth msg public key error:%w", err)
|
||||
}
|
||||
|
||||
identity := authMsg.Identity
|
||||
|
||||
// create or get the prover message channel
|
||||
taskCh, err := provermanager.Manager.Register(ctx, pubKey, identity)
|
||||
if err != nil {
|
||||
return &rpc.Subscription{}, err
|
||||
}
|
||||
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
|
||||
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
|
||||
|
||||
log.Info("prover register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// TODO worker add metrics
|
||||
func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pubKey string, identity *message.Identity, taskCh <-chan *message.TaskMsg) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Error("task worker subId:%d panic for:%v", err)
|
||||
}
|
||||
|
||||
provermanager.Manager.FreeProver(pubKey)
|
||||
log.Info("prover unregister", "name", identity.Name, "pubKey", pubKey)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case task := <-taskCh:
|
||||
notifier.Notify(rpcSub.ID, task) //nolint
|
||||
case err := <-rpcSub.Err():
|
||||
coordinatorProversDisconnectsTotalCounter.Inc(1)
|
||||
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
|
||||
return
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package provermanager
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
type proverMetrics struct {
|
||||
proverProofsVerifiedSuccessTimeTimer gethMetrics.Timer
|
||||
proverProofsVerifiedFailedTimeTimer gethMetrics.Timer
|
||||
proverProofsGeneratedFailedTimeTimer gethMetrics.Timer
|
||||
proverProofsLastAssignedTimestampGauge gethMetrics.Gauge
|
||||
proverProofsLastFinishedTimestampGauge gethMetrics.Gauge
|
||||
}
|
||||
|
||||
func (r *proverManager) UpdateMetricProverProofsLastFinishedTimestampGauge(pk string) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
rMs := node.(*proverNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.proverProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *proverManager) UpdateMetricProverProofsLastAssignedTimestampGauge(pk string) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
rMs := node.(*proverNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.proverProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *proverManager) UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
rMs := node.(*proverNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.proverProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *proverManager) UpdateMetricProverProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
rMs := node.(*proverNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.proverProofsVerifiedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *proverManager) UpdateMetricProverProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
rMs := node.(*proverNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.proverProofsGeneratedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
package provermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
// Manager the global prover manager
|
||||
Manager *proverManager
|
||||
)
|
||||
|
||||
// ProverNode is the interface that controls the provers
|
||||
type proverNode struct {
|
||||
// Prover name
|
||||
Name string
|
||||
// Prover type
|
||||
Type message.ProofType
|
||||
// Prover public key
|
||||
PublicKey string
|
||||
// Prover version
|
||||
Version string
|
||||
|
||||
// task channel
|
||||
taskChan chan *message.TaskMsg
|
||||
// session id list which delivered to prover.
|
||||
TaskIDs cmap.ConcurrentMap
|
||||
|
||||
// Time of message creation
|
||||
registerTime time.Time
|
||||
|
||||
metrics *proverMetrics
|
||||
}
|
||||
|
||||
type proverManager struct {
|
||||
proverPool cmap.ConcurrentMap
|
||||
proverTaskOrm *orm.ProverTask
|
||||
}
|
||||
|
||||
// InitProverManager init a prover manager
|
||||
func InitProverManager(db *gorm.DB) {
|
||||
once.Do(func() {
|
||||
Manager = &proverManager{
|
||||
proverPool: cmap.New(),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Register the identity message to prover manager with the public key
|
||||
func (r *proverManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
|
||||
node, ok := r.proverPool.Get(proverPublicKey)
|
||||
if !ok {
|
||||
taskIDs, err := r.reloadProverAssignedTasks(ctx, proverPublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("register error:%w", err)
|
||||
}
|
||||
|
||||
rMs := &proverMetrics{
|
||||
proverProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
|
||||
proverProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
|
||||
proverProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
|
||||
proverProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
|
||||
proverProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
|
||||
}
|
||||
node = &proverNode{
|
||||
Name: identity.Name,
|
||||
Type: identity.ProverType,
|
||||
Version: identity.Version,
|
||||
PublicKey: proverPublicKey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
metrics: rMs,
|
||||
}
|
||||
r.proverPool.Set(proverPublicKey, node)
|
||||
}
|
||||
prover := node.(*proverNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(prover.registerTime) < 60 {
|
||||
log.Warn("prover reconnect too frequently", "prover_name", identity.Name, "prover_type", identity.ProverType, "public key", proverPublicKey)
|
||||
return nil, fmt.Errorf("prover reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
prover.registerTime = time.Now()
|
||||
|
||||
return prover.taskChan, nil
|
||||
}
|
||||
|
||||
func (r *proverManager) reloadProverAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
|
||||
var assignedProverTasks []orm.ProverTask
|
||||
page := 0
|
||||
limit := 100
|
||||
for {
|
||||
page++
|
||||
whereFields := make(map[string]interface{})
|
||||
whereFields["proving_status"] = int16(types.ProverAssigned)
|
||||
orderBy := []string{"id asc"}
|
||||
offset := (page - 1) * limit
|
||||
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
|
||||
if err != nil {
|
||||
log.Warn("reloadProverAssignedTasks get all assigned failure", "error", err)
|
||||
return nil, fmt.Errorf("reloadProverAssignedTasks error:%w", err)
|
||||
}
|
||||
if len(batchAssignedProverTasks) < limit {
|
||||
break
|
||||
}
|
||||
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
|
||||
}
|
||||
|
||||
taskIDs := cmap.New()
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.ProverAssigned) {
|
||||
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
|
||||
}
|
||||
}
|
||||
return &taskIDs, nil
|
||||
}
|
||||
|
||||
// SendTask send the need proved message to prover
|
||||
func (r *proverManager) SendTask(proverType message.ProofType, msg *message.TaskMsg) (string, string, error) {
|
||||
tmpProver := r.selectProver(proverType)
|
||||
if tmpProver == nil {
|
||||
return "", "", errors.New("selectProver returns nil")
|
||||
}
|
||||
|
||||
select {
|
||||
case tmpProver.taskChan <- msg:
|
||||
tmpProver.TaskIDs.Set(msg.ID, struct{}{})
|
||||
default:
|
||||
err := fmt.Errorf("prover channel is full, proverName:%s, publicKey:%s", tmpProver.Name, tmpProver.PublicKey)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
r.UpdateMetricProverProofsLastAssignedTimestampGauge(tmpProver.PublicKey)
|
||||
|
||||
return tmpProver.PublicKey, tmpProver.Name, nil
|
||||
}
|
||||
|
||||
// ExistTaskIDForProver check the task exist
|
||||
func (r *proverManager) ExistTaskIDForProver(pk string, id string) bool {
|
||||
node, ok := r.proverPool.Get(pk)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
prover := node.(*proverNode)
|
||||
return prover.TaskIDs.Has(id)
|
||||
}
|
||||
|
||||
// FreeProver free the prover with the pk key
|
||||
func (r *proverManager) FreeProver(pk string) {
|
||||
r.proverPool.Pop(pk)
|
||||
}
|
||||
|
||||
// FreeTaskIDForProver free a task of the pk prover
|
||||
func (r *proverManager) FreeTaskIDForProver(pk string, id string) {
|
||||
if node, ok := r.proverPool.Get(pk); ok {
|
||||
prover := node.(*proverNode)
|
||||
prover.TaskIDs.Pop(id)
|
||||
}
|
||||
}
|
||||
|
||||
// GetNumberOfIdleProvers return the count of idle provers.
|
||||
func (r *proverManager) GetNumberOfIdleProvers(proverType message.ProofType) (count int) {
|
||||
for item := range r.proverPool.IterBuffered() {
|
||||
prover := item.Val.(*proverNode)
|
||||
if prover.TaskIDs.Count() == 0 && prover.Type == proverType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (r *proverManager) selectProver(proverType message.ProofType) *proverNode {
|
||||
pubkeys := r.proverPool.Keys()
|
||||
for len(pubkeys) > 0 {
|
||||
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
|
||||
if val, ok := r.proverPool.Get(pubkeys[idx.Int64()]); ok {
|
||||
rn := val.(*proverNode)
|
||||
if rn.TaskIDs.Count() == 0 && rn.Type == proverType {
|
||||
return rn
|
||||
}
|
||||
}
|
||||
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
151
coordinator/internal/logic/provertask/batch_prover_task.go
Normal file
151
coordinator/internal/logic/provertask/batch_prover_task.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// BatchProverTask is prover task implement for batch proof
|
||||
type BatchProverTask struct {
|
||||
BaseProverTask
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, db *gorm.DB) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return bp
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, fmt.Errorf("get public key from contex failed")
|
||||
}
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, fmt.Errorf("get prover name from contex failed")
|
||||
}
|
||||
|
||||
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(batchTasks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash)
|
||||
|
||||
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
|
||||
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: publicKey.(string),
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: proverName.(string),
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
|
||||
if err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
|
||||
}
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, taskID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var chunkProofs []*message.ChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, taskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail := message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", taskID, err)
|
||||
}
|
||||
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
TaskID: taskID,
|
||||
TaskType: int(message.ProofTypeBatch),
|
||||
TaskData: string(chunkProofsBytes),
|
||||
}
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
// recoverProvingStatus if not return the batch task to prover success,
|
||||
// need recover the proving status to unassigned
|
||||
func (bp *BatchProverTask) recoverProvingStatus(ctx *gin.Context, batchTask *orm.Batch) {
|
||||
if err := bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Warn("failed to recover batch proving status", "hash:", batchTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
134
coordinator/internal/logic/provertask/chunk_prover_task.go
Normal file
134
coordinator/internal/logic/provertask/chunk_prover_task.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, db *gorm.DB) *ChunkProverTask {
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, fmt.Errorf("get public key from contex failed")
|
||||
}
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, fmt.Errorf("get prover name from contex failed")
|
||||
}
|
||||
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash)
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
|
||||
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: publicKey.(string),
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: proverName.(string),
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
|
||||
if err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
|
||||
}
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
|
||||
if wrappedErr != nil || len(wrappedBlocks) == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, wrappedErr)
|
||||
}
|
||||
|
||||
blockHashes := make([]common.Hash, len(wrappedBlocks))
|
||||
for i, wrappedBlock := range wrappedBlocks {
|
||||
blockHashes[i] = wrappedBlock.Header.Hash()
|
||||
}
|
||||
|
||||
taskDetail := message.ChunkTaskDetail{
|
||||
BlockHashes: blockHashes,
|
||||
}
|
||||
blockHashesBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", hash, err)
|
||||
}
|
||||
|
||||
proverTaskSchema := &coordinatorType.GetTaskSchema{
|
||||
TaskID: hash,
|
||||
TaskType: int(message.ProofTypeChunk),
|
||||
TaskData: string(blockHashesBytes),
|
||||
}
|
||||
|
||||
return proverTaskSchema, nil
|
||||
}
|
||||
|
||||
// recoverProvingStatus if not return the batch task to prover success,
|
||||
// need recover the proving status to unassigned
|
||||
func (cp *ChunkProverTask) recoverProvingStatus(ctx *gin.Context, chunkTask *orm.Chunk) {
|
||||
if err := cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Warn("failed to recover chunk proving status", "hash:", chunkTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package collector
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
@@ -12,28 +13,19 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BatchCollectorName the name of batch collector
|
||||
BatchCollectorName = "batch_collector"
|
||||
// ChunkCollectorName the name of chunk collector
|
||||
ChunkCollectorName = "chunk_collector"
|
||||
)
|
||||
|
||||
var coordinatorSessionsTimeoutTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
|
||||
|
||||
// Collector the interface of a collector who send data to prover
|
||||
type Collector interface {
|
||||
Name() string
|
||||
Collect(ctx context.Context) error
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
// BaseCollector a base collector which contain series functions
|
||||
type BaseCollector struct {
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
@@ -45,7 +37,7 @@ type BaseCollector struct {
|
||||
}
|
||||
|
||||
// checkAttempts use the count of prover task info to check the attempts
|
||||
func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
|
||||
func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
|
||||
whereFields := make(map[string]interface{})
|
||||
whereFields["task_id"] = hash
|
||||
whereFields["task_type"] = int16(taskType)
|
||||
@@ -55,17 +47,11 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
|
||||
return true
|
||||
}
|
||||
|
||||
if len(proverTasks) >= int(b.cfg.ProverManagerConfig.SessionAttempts) {
|
||||
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
|
||||
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
|
||||
|
||||
for _, proverTask := range proverTasks {
|
||||
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
|
||||
provermanager.Manager.FreeTaskIDForProver(proverTask.ProverPublicKey, hash)
|
||||
}
|
||||
}
|
||||
|
||||
transErr := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
switch message.ProofType(proverTasks[0].TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
@@ -89,29 +75,3 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseCollector) sendTask(taskMsg *message.TaskMsg) ([]*coordinatorType.ProverStatus, error) {
|
||||
var err error
|
||||
var proverStatusList []*coordinatorType.ProverStatus
|
||||
for i := uint8(0); i < b.cfg.ProverManagerConfig.ProversPerSession; i++ {
|
||||
proverPubKey, proverName, sendErr := provermanager.Manager.SendTask(taskMsg.Type, taskMsg)
|
||||
if sendErr != nil {
|
||||
err = sendErr
|
||||
continue
|
||||
}
|
||||
|
||||
provermanager.Manager.UpdateMetricProverProofsLastAssignedTimestampGauge(proverPubKey)
|
||||
|
||||
proverStatus := &coordinatorType.ProverStatus{
|
||||
PublicKey: proverPubKey,
|
||||
Name: proverName,
|
||||
Status: types.ProverAssigned,
|
||||
}
|
||||
proverStatusList = append(proverStatusList, proverStatus)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proverStatusList, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package proof
|
||||
package submitproof
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
@@ -15,9 +16,9 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provermanager"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,25 +38,25 @@ var (
|
||||
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
|
||||
)
|
||||
|
||||
// ZKProofReceiver the proof receiver
|
||||
type ZKProofReceiver struct {
|
||||
// ProofReceiverLogic the proof receiver logic
|
||||
type ProofReceiverLogic struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
db *gorm.DB
|
||||
cfg *config.ProverManagerConfig
|
||||
cfg *config.ProverManager
|
||||
|
||||
verifier *verifier.Verifier
|
||||
}
|
||||
|
||||
// NewZKProofReceiver create a proof receiver
|
||||
func NewZKProofReceiver(cfg *config.ProverManagerConfig, db *gorm.DB) *ZKProofReceiver {
|
||||
// NewSubmitProofReceiverLogic create a proof receiver logic
|
||||
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB) *ProofReceiverLogic {
|
||||
vf, err := verifier.NewVerifier(cfg.Verifier)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
return &ZKProofReceiver{
|
||||
return &ProofReceiverLogic{
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -70,9 +71,11 @@ func NewZKProofReceiver(cfg *config.ProverManagerConfig, db *gorm.DB) *ZKProofRe
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
|
||||
pk, _ := proofMsg.PublicKey()
|
||||
provermanager.Manager.UpdateMetricProverProofsLastFinishedTimestampGauge(pk)
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
return fmt.Errorf("get public key from contex failed")
|
||||
}
|
||||
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
@@ -84,7 +87,7 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
|
||||
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
@@ -131,34 +134,29 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "prover pk", pk, "prove type",
|
||||
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
|
||||
// TODO: Prover needs to be slashed if proof is invalid.
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
provermanager.Manager.UpdateMetricProverProofsVerifiedFailedTimeTimer(pk, proofTime)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
return nil
|
||||
|
||||
if verifyErr == nil {
|
||||
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
|
||||
}
|
||||
return verifyErr
|
||||
}
|
||||
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
return err
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
provermanager.Manager.UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk, proofTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
|
||||
func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
|
||||
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -177,7 +175,7 @@ func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunk
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
func (m *ProofReceiverLogic) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
@@ -198,8 +196,6 @@ func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proof
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
provermanager.Manager.UpdateMetricProverProofsGeneratedFailedTimeTimer(pk, proofTime)
|
||||
|
||||
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
@@ -207,31 +203,29 @@ func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proof
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
provermanager.Manager.FreeTaskIDForProver(pubKey, hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofStatus update the chunk/batch task and session info status
|
||||
func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
|
||||
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
|
||||
// if the prover task failure type is SessionInfoFailureTimeout,
|
||||
// just skip update the status because the proof result come too late.
|
||||
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
|
||||
@@ -285,7 +279,7 @@ func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, pr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
|
||||
func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
|
||||
var provingStatus types.ProvingStatus
|
||||
var err error
|
||||
|
||||
@@ -305,7 +299,7 @@ func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, p
|
||||
return provingStatus == types.ProvingTaskVerified
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
|
||||
func (m *ProofReceiverLogic) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -14,44 +14,68 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
|
||||
batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path")
|
||||
chunkProofPath = flag.String("chunk_proof", "/assets/proof_data/chunk_proof", "chunk proof file path")
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
|
||||
batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path")
|
||||
chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1")
|
||||
chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
v, err := NewVerifier(cfg)
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProofFile, err := os.Open(*chunkProofPath)
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1)
|
||||
as.NoError(err)
|
||||
chunkProofByt, err := io.ReadAll(chunkProofFile)
|
||||
as.NoError(err)
|
||||
chunkProof := &message.ChunkProof{}
|
||||
as.NoError(json.Unmarshal(chunkProofByt, chunkProof))
|
||||
as.True(chunkOk1)
|
||||
t.Log("Verified chunk proof 1")
|
||||
|
||||
chunkOk, err := v.VerifyChunkProof(chunkProof)
|
||||
chunkProof2 := readChunkProof(*chunkProofPath2, as)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2)
|
||||
as.NoError(err)
|
||||
as.True(chunkOk)
|
||||
|
||||
batchProofFile, err := os.Open(*batchProofPath)
|
||||
as.NoError(err)
|
||||
batchProofByt, err := io.ReadAll(batchProofFile)
|
||||
as.NoError(err)
|
||||
batchProof := &message.BatchProof{}
|
||||
as.NoError(json.Unmarshal(batchProofByt, batchProof))
|
||||
as.True(chunkOk2)
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof)
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
}
|
||||
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *message.BatchProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.BatchProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *message.ChunkProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.ChunkProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
50
coordinator/internal/middleware/challenge_jwt.go
Normal file
50
coordinator/internal/middleware/challenge_jwt.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// ChallengeMiddleware jwt challenge middleware
|
||||
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
Authenticator: func(c *gin.Context) (interface{}, error) {
|
||||
return nil, nil
|
||||
},
|
||||
PayloadFunc: func(data interface{}) jwt.MapClaims {
|
||||
b := make([]byte, 32)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
return jwt.MapClaims{
|
||||
"random": base64.URLEncoding.EncodeToString(b),
|
||||
}
|
||||
},
|
||||
Unauthorized: unauthorized,
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
LoginResponse: loginResponse,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Crit("new jwt middleware panic", "error", err)
|
||||
}
|
||||
|
||||
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
|
||||
log.Crit("init jwt middleware panic", "error", errInit)
|
||||
}
|
||||
|
||||
return jwtMiddleware
|
||||
}
|
||||
34
coordinator/internal/middleware/jwt.go
Normal file
34
coordinator/internal/middleware/jwt.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func unauthorized(c *gin.Context, _ int, message string) {
|
||||
lower := strings.ToLower(message)
|
||||
var errCode int
|
||||
err := errors.New(lower)
|
||||
if jwt.ErrExpiredToken.Error() == lower {
|
||||
errCode = types.ErrJWTTokenExpired
|
||||
} else {
|
||||
errCode = types.ErrJWTCommonErr
|
||||
}
|
||||
coordinatorType.RenderJSON(c, errCode, err, nil)
|
||||
}
|
||||
|
||||
func loginResponse(c *gin.Context, code int, message string, time time.Time) {
|
||||
resp := coordinatorType.LoginSchema{
|
||||
Time: time,
|
||||
Token: message,
|
||||
}
|
||||
coordinatorType.RenderJSON(c, types.Success, nil, resp)
|
||||
}
|
||||
39
coordinator/internal/middleware/login_jwt.go
Normal file
39
coordinator/internal/middleware/login_jwt.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// LoginMiddleware jwt auth middleware
|
||||
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
PayloadFunc: api.Auth.PayloadFunc,
|
||||
IdentityHandler: api.Auth.IdentityHandler,
|
||||
IdentityKey: types.PublicKey,
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
|
||||
Authenticator: api.Auth.Login,
|
||||
Unauthorized: unauthorized,
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
LoginResponse: loginResponse,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Crit("new jwt middleware panic", "error", err)
|
||||
}
|
||||
|
||||
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
|
||||
log.Crit("init jwt middleware panic", "error", errInit)
|
||||
}
|
||||
|
||||
return jwtMiddleware
|
||||
}
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
const defaultBatchHeaderVersion = 0
|
||||
@@ -273,3 +274,28 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUnassignedBatchReturning update the unassigned batch and return the update record
|
||||
func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Batch{}).Select("index")
|
||||
subQueryDB = subQueryDB.Where("proving_status = ? AND chunk_proofs_status = ?", types.ProvingTaskUnassigned, types.ChunkProofsStatusReady)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
db = db.Model(&batches).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
57
coordinator/internal/orm/challenge.go
Normal file
57
coordinator/internal/orm/challenge.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Challenge store the challenge string from prover client
|
||||
type Challenge struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID int64 `json:"id" gorm:"column:id"`
|
||||
Challenge string `json:"challenge" gorm:"column:challenge"`
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// NewChallenge creates a new change instance.
|
||||
func NewChallenge(db *gorm.DB) *Challenge {
|
||||
return &Challenge{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the name of the "prover_task" table.
|
||||
func (r *Challenge) TableName() string {
|
||||
return "challenge"
|
||||
}
|
||||
|
||||
// InsertChallenge check the challenge string exist, if the challenge string is existed
|
||||
// return error, if not, just insert it
|
||||
func (r *Challenge) InsertChallenge(ctx context.Context, challengeString string) error {
|
||||
challenge := Challenge{
|
||||
Challenge: challengeString,
|
||||
}
|
||||
|
||||
db := r.db.WithContext(ctx)
|
||||
db = db.Model(&Challenge{})
|
||||
db = db.Where("challenge = ?", challengeString)
|
||||
result := db.FirstOrCreate(&challenge)
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
if result.RowsAffected == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
return fmt.Errorf("the challenge string:%s have been used", challengeString)
|
||||
}
|
||||
|
||||
return fmt.Errorf("insert challenge string affected rows more than 1")
|
||||
}
|
||||
@@ -7,11 +7,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// Chunk represents a chunk of blocks in the database.
|
||||
@@ -119,7 +120,7 @@ func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
|
||||
}
|
||||
proofs = append(proofs, &proof)
|
||||
}
|
||||
@@ -339,3 +340,32 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUnassignedChunkReturning update the unassigned batch which end_block_number <= height and return the update record
|
||||
func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limit int) ([]*Chunk, error) {
|
||||
if height <= 0 {
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: height must be larger than zero")
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Chunk{}).Select("index")
|
||||
subQueryDB = subQueryDB.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
subQueryDB = subQueryDB.Where("end_block_number <= ?", height)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(limit)
|
||||
|
||||
var chunks []*Chunk
|
||||
db = db.Model(&chunks).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ type L2Block struct {
|
||||
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
RowConsumption string `json:"row_consumption" gorm:"row_consumption"`
|
||||
|
||||
// chunk
|
||||
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
|
||||
@@ -54,7 +55,7 @@ func (*L2Block) TableName() string {
|
||||
func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root")
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("chunk_hash = ?", chunkHash)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
@@ -77,6 +78,10 @@ func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksByChunkHash error: %w, chunk hash: %v", err, chunkHash)
|
||||
}
|
||||
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
@@ -100,6 +105,12 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
rc, err := json.Marshal(block.RowConsumption)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal RowConsumption", "hash", block.Header.Hash().String(), "err", err)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w, block hash: %v", err, block.Header.Hash().String())
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
@@ -111,6 +122,7 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
Header: string(header),
|
||||
RowConsumption: string(rc),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
}
|
||||
@@ -123,3 +135,18 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateChunkHashInRange updates the chunk hash for l2 blocks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
// for unit test
|
||||
func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startNumber uint64, endNumber uint64, chunkHash string) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Where("number >= ? AND number <= ?", startNumber, endNumber)
|
||||
|
||||
if err := db.Update("chunk_hash", chunkHash).Error; err != nil {
|
||||
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start number: %v, end number: %v, chunk hash: %v",
|
||||
err, startNumber, endNumber, chunkHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
34
coordinator/internal/route/route.go
Normal file
34
coordinator/internal/route/route.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package route
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/middleware"
|
||||
)
|
||||
|
||||
// Route register route for coordinator
|
||||
func Route(router *gin.Engine, cfg *config.Config) {
|
||||
r := router.Group("coordinator")
|
||||
v1(r, cfg)
|
||||
}
|
||||
|
||||
func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
r.GET("/health", api.HealthCheck.HealthCheck)
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
loginMiddleware := middleware.LoginMiddleware(conf)
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/get_task", api.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
|
||||
}
|
||||
}
|
||||
31
coordinator/internal/types/auth.go
Normal file
31
coordinator/internal/types/auth.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// PublicKey the public key for context
|
||||
PublicKey = "public_key"
|
||||
// ProverName the prover name key for context
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
type LoginParameter struct {
|
||||
Message Message `form:"message" json:"message" binding:"required"`
|
||||
Signature string `form:"signature" json:"signature" binding:"required"`
|
||||
}
|
||||
|
||||
// LoginSchema for /login response
|
||||
type LoginSchema struct {
|
||||
Time time.Time `json:"time"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
14
coordinator/internal/types/get_task.go
Normal file
14
coordinator/internal/types/get_task.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight int `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
type GetTaskSchema struct {
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user