Compare commits

...

3 Commits

Author SHA1 Message Date
colin
faec817d34 feat(coordinator): upgrade coordinator to rollup v2 (#610)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-07 16:23:26 +02:00
ChuhanJin
72ef2cc80e fix(bridge-history-api): fix insert string slice and db type (#614)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-07-06 10:48:36 +08:00
Ahmed Castro
8f0690be41 refactor: turn L1ERC721Gateway and L1ERC1155Gateway internal functions virtual (#552)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2023-07-05 09:48:36 +02:00
100 changed files with 1891 additions and 3965 deletions

View File

@@ -8,11 +8,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
backendabi "bridge-history-api/abi"
"bridge-history-api/db"
"bridge-history-api/db/orm"
"bridge-history-api/utils"
)
@@ -100,19 +98,11 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
for i := range depositL1CrossMsgs {
for _, msgHash := range msgHashes {
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -161,22 +151,12 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs)
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
var l2SentMsgs []*orm.L2SentMsg
for i := range depositL2CrossMsgs {
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -194,12 +174,10 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
if len(l2SentMsgs) > 0 {
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
err = dbTx.Commit()
@@ -251,25 +229,3 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
}
return nil
}
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}

View File

@@ -3,7 +3,7 @@
create table cross_message
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL DEFAULT '',
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
@@ -14,10 +14,8 @@ create table cross_message
layer2_token VARCHAR NOT NULL DEFAULT '',
asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL,
-- use array to support nft bridge
token_ids VARCHAR[] NOT NULL DEFAULT '{}',
-- use array to support nft bridge
token_amounts VARCHAR[] NOT NULL DEFAULT '{}',
token_ids TEXT NOT NULL DEFAULT '',
token_amounts TEXT NOT NULL DEFAULT '',
block_timestamp TIMESTAMP(0) DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,

View File

@@ -3,7 +3,7 @@
create table l2_sent_msg
(
id BIGSERIAL PRIMARY KEY,
tx_sender VARCHAR NOT NULL,
original_sender VARCHAR NOT NULL DEFAULT '',
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,

View File

@@ -50,8 +50,8 @@ type CrossMsg struct {
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenIDs []string `json:"token_ids" db:"token_ids"`
TokenAmounts []string `json:"token_amounts" db:"token_amounts"`
TokenIDs string `json:"token_ids" db:"token_ids"`
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"`
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`

View File

@@ -65,6 +65,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"target": msg.Target,
"amount": msg.Amount,
"asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer1_hash": msg.Layer1Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
@@ -72,7 +73,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer1Msg,
}
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps)
if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
return err

View File

@@ -71,12 +71,12 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer2_hash": msg.Layer2Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
@@ -85,7 +85,7 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer2Msg,
}
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
return err

View File

@@ -10,20 +10,20 @@ import (
)
type L2SentMsg struct {
ID uint64 `json:"id" db:"id"`
TxSender string `json:"tx_sender" db:"tx_sender"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Value string `json:"value" db:"value"`
Height uint64 `json:"height" db:"height"`
Nonce uint64 `json:"nonce" db:"nonce"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
MsgProof string `json:"msg_proof" db:"msg_proof"`
MsgData string `json:"msg_data" db:"msg_data"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
ID uint64 `json:"id" db:"id"`
OriginalSender string `json:"original_sender" db:"original_sender"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Value string `json:"value" db:"value"`
Height uint64 `json:"height" db:"height"`
Nonce uint64 `json:"nonce" db:"nonce"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
MsgProof string `json:"msg_proof" db:"msg_proof"`
MsgData string `json:"msg_data" db:"msg_data"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
type l2SentMsgOrm struct {
@@ -52,19 +52,19 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"tx_sender": msg.TxSender,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"nonce": msg.Nonce,
"batch_index": msg.BatchIndex,
"msg_proof": msg.MsgProof,
"msg_data": msg.MsgData,
"original_sender": msg.OriginalSender,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"nonce": msg.Nonce,
"batch_index": msg.BatchIndex,
"msg_proof": msg.MsgProof,
"msg_data": msg.MsgData,
}
}
_, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err
@@ -95,7 +95,7 @@ func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sql
}
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {

View File

@@ -3,7 +3,6 @@ package utils
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
@@ -13,16 +12,6 @@ import (
"bridge-history-api/db/orm"
)
type MsgHashWrapper struct {
MsgHash common.Hash
TxHash common.Hash
}
type L2SentMsgWrapper struct {
L2SentMsg *orm.L2SentMsg
TxHash common.Hash
}
type CachedParsedTxCalldata struct {
CallDataIndex uint64
BatchIndices []uint64
@@ -30,13 +19,13 @@ type CachedParsedTxCalldata struct {
EndBlocks []uint64
}
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []MsgHashWrapper
var msgHash string
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
@@ -44,7 +33,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
@@ -53,13 +42,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(),
MsgHash: msgHash,
})
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
@@ -70,13 +60,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgHash: msgHash,
})
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
@@ -86,14 +77,15 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()},
TokenIDs: event.TokenID.String(),
MsgHash: msgHash,
})
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
@@ -103,26 +95,26 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()},
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
MsgHash: msgHash,
})
case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, MsgHashWrapper{
MsgHash: msgHash,
TxHash: vlog.TxHash})
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
return l1CrossMsg, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
@@ -133,17 +125,17 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
}
}
return l1CrossMsg, msgHashes, relayedMsgs, nil
return l1CrossMsg, relayedMsgs, nil
}
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) {
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg
var l2SentMsg []L2SentMsgWrapper
var l2SentMsgs []*orm.L2SentMsg
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
@@ -151,8 +143,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
@@ -160,14 +153,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
@@ -183,8 +178,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
@@ -193,15 +189,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()},
TokenIDs: event.TokenID.String(),
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
@@ -210,7 +207,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()},
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
})
case backendabi.L2SentMessageEventSignature:
@@ -218,28 +215,26 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsg = append(l2SentMsg,
L2SentMsgWrapper{
TxHash: vlog.TxHash,
L2SentMsg: &orm.L2SentMsg{
Sender: event.Sender.Hex(),
Target: event.Target.Hex(),
Value: event.Value.String(),
MsgHash: msgHash.Hex(),
Height: vlog.BlockNumber,
Nonce: event.MessageNonce.Uint64(),
MsgData: hexutil.Encode(event.Message),
},
l2SentMsgs = append(l2SentMsgs,
&orm.L2SentMsg{
Sender: event.Sender.Hex(),
Target: event.Target.Hex(),
Value: event.Value.String(),
MsgHash: msgHash.Hex(),
Height: vlog.BlockNumber,
Nonce: event.MessageNonce.Uint64(),
MsgData: hexutil.Encode(event.Message),
})
case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
@@ -249,7 +244,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
}
}
return l2CrossMsg, relayedMsgs, l2SentMsg, nil
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
}
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {

View File

@@ -4,11 +4,8 @@ go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2
@@ -25,7 +22,6 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
@@ -45,7 +41,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
@@ -64,7 +60,6 @@ require (
golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -29,9 +29,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -69,12 +66,9 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -86,9 +80,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -96,10 +87,8 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -113,10 +102,7 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -175,7 +161,6 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -218,8 +203,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -240,13 +223,3 @@ gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=

View File

@@ -15,9 +15,10 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
)

View File

@@ -22,7 +22,6 @@ import (
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
)
var (
@@ -171,8 +170,8 @@ func (r *Layer2Relayer) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &bridgeTypes.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{{
chunk := &types.Chunk{
Blocks: []*types.WrappedBlock{{
Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
@@ -191,7 +190,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -319,7 +318,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
for _, batch := range pendingBatches {
// get current header and parent header.
currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader)
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
return
@@ -346,7 +345,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
var wrappedBlocks []*bridgeTypes.WrappedBlock
var wrappedBlocks []*types.WrappedBlock
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch wrapped blocks",
@@ -354,7 +353,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
"end number", c.EndBlockNumber, "error", err)
return
}
chunk := &bridgeTypes.Chunk{
chunk := &types.Chunk{
Blocks: wrappedBlocks,
}
var chunkBytes []byte

View File

@@ -16,10 +16,10 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
)
@@ -49,7 +49,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
@@ -57,7 +57,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
relayer.ProcessPendingBatches()
@@ -76,7 +76,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
@@ -118,7 +118,7 @@ func testL2RelayerSkipBatches(t *testing.T) {
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
@@ -187,7 +187,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
@@ -235,10 +235,10 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
defer bridgeUtils.CloseDB(db)
batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.

View File

@@ -10,9 +10,9 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
)
var (
@@ -25,12 +25,12 @@ var (
l2Cli *ethclient.Client
// l2 block
wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
// chunk
chunk1 *bridgeTypes.Chunk
chunk2 *bridgeTypes.Chunk
chunk1 *types.Chunk
chunk2 *types.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
@@ -58,19 +58,19 @@ func setupEnv(t *testing.T) {
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
assert.NoError(t, err)
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err)
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err)
}

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
)
// BatchProposer proposes batches based on available unbatched chunks.
@@ -154,8 +155,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) {
chunks := make([]*bridgeTypes.Chunk, len(dbChunks))
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks {
wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
@@ -163,7 +164,7 @@ func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridge
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return nil, err
}
chunks[i] = &bridgeTypes.Chunk{
chunks[i] = &types.Chunk{
Blocks: wrappedBlocks,
}
}

View File

@@ -10,7 +10,6 @@ import (
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -20,7 +19,7 @@ func testBatchProposer(t *testing.T) {
defer utils.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
)
// ChunkProposer proposes chunks based on available unchunked blocks.
@@ -58,7 +59,7 @@ func (p *ChunkProposer) TryProposeChunk() {
}
}
func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
if chunk == nil {
log.Warn("proposed chunk is nil, cannot update in DB")
return nil
@@ -78,7 +79,7 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
return err
}
func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
if err != nil {
return nil, err
@@ -166,5 +167,5 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
)
return nil, nil
}
return &bridgeTypes.Chunk{Blocks: blocks}, nil
return &types.Chunk{Blocks: blocks}, nil
}

View File

@@ -6,9 +6,10 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -18,7 +19,7 @@ func testChunkProposer(t *testing.T) {
defer utils.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -31,8 +32,8 @@ func testChunkProposer(t *testing.T) {
}, db)
cp.TryProposeChunk()
expectedChunk := &bridgeTypes.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2},
expectedChunk := &types.Chunk{
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
}
expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err)

View File

@@ -22,7 +22,6 @@ import (
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -160,7 +159,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
}
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*bridgeTypes.WrappedBlock
var blocks []*types.WrappedBlock
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -175,7 +174,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &bridgeTypes.WrappedBlock{
blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),

View File

@@ -10,10 +10,11 @@ import (
"gorm.io/gorm"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -27,8 +28,8 @@ var (
l2Cli *ethclient.Client
// block trace
wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
)
func setupEnv(t *testing.T) (err error) {
@@ -56,7 +57,7 @@ func setupEnv(t *testing.T) (err error) {
return err
}
// unmarshal blockTrace
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
@@ -66,7 +67,7 @@ func setupEnv(t *testing.T) (err error) {
return err
}
// unmarshal blockTrace
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}

View File

@@ -10,8 +10,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -35,6 +33,7 @@ type Batch struct {
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
ChunkProofsReady int16 `json:"chunk_proofs_ready" gorm:"column:chunk_proofs_ready;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
@@ -107,14 +106,16 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
var batch Batch
err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error
if err != nil {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
if err := db.Find(&batch).Error; err != nil {
return nil, err
}
var proof message.AggProof
err = json.Unmarshal(batch.Proof, &proof)
if err != nil {
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
return nil, err
}
@@ -134,12 +135,15 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
return []types.RollupStatus{}, nil
return nil, nil
}
var batches []Batch
err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error
if err != nil {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
if err := db.Find(&batches).Error; err != nil {
return nil, err
}
@@ -189,7 +193,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
@@ -217,8 +221,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *bridgeTypes.BatchHeader
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err
@@ -228,7 +232,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
version = parentBatchHeader.Version()
}
batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil {
log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
@@ -240,17 +244,18 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
ChunkProofsReady: 0,
}
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
@@ -267,7 +272,7 @@ func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
result := o.db.WithContext(ctx).Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
@@ -303,10 +308,9 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
default:
}
if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
@@ -328,7 +332,7 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
}
if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
@@ -362,7 +366,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
return nil
}
// UpdateProofByHash updates the block batch proof by hash.
// UpdateProofByHash updates the batch proof by hash.
// for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)

View File

@@ -7,8 +7,6 @@ import (
"scroll-tech/common/types"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
@@ -33,7 +31,7 @@ type Chunk struct {
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
@@ -107,7 +105,7 @@ func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
@@ -193,7 +191,6 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
default:
}
if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
@@ -211,8 +208,5 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
}
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil {
return err
}
return nil
return db.Update("batch_hash", batchHash).Error
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge/internal/types"
"scroll-tech/common/types"
)
// L2Block represents a l2 block in the database.
@@ -56,10 +56,12 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) {
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
var l2Blocks []L2Block
if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root").
Where("chunk_hash IS NULL").
Order("number asc").
Find(&l2Blocks).Error; err != nil {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err
}
@@ -119,6 +121,8 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
var l2Blocks []L2Block
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")

View File

@@ -1,61 +0,0 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -1,86 +0,0 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -13,9 +13,9 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -27,10 +27,10 @@ var (
chunkOrm *Chunk
batchOrm *Batch
wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock
chunk1 *bridgeTypes.Chunk
chunk2 *bridgeTypes.Chunk
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
chunk1 *types.Chunk
chunk2 *types.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
@@ -64,28 +64,22 @@ func setupEnv(t *testing.T) {
l2BlockOrm = NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
if err != nil {
t.Fatalf("failed to read file: %v", err)
}
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
if err != nil {
t.Fatalf("failed to read file: %v", err)
}
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
assert.NoError(t, err)
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err)
}
@@ -102,7 +96,7 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
@@ -135,9 +129,6 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
@@ -177,35 +168,24 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err)
batchHash2 := batchHeader2.Hash().Hex()
assert.Equal(t, hash2, batchHash2)

View File

@@ -1,136 +0,0 @@
package types
import (
"encoding/binary"
"errors"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -11,7 +11,7 @@ import (
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
Logger: logger.Default.LogMode(logger.Warn),
})
if err != nil {
return nil, err

View File

@@ -13,9 +13,10 @@ import (
"scroll-tech/common/docker"
"scroll-tech/database/migrate"
bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
)

View File

@@ -14,7 +14,6 @@ import (
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -72,8 +71,8 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err)
// add fake chunk
chunk := &bridgeTypes.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{
chunk := &types.Chunk{
Blocks: []*types.WrappedBlock{
{
Header: &gethTypes.Header{
Number: big.NewInt(1),
@@ -90,7 +89,7 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk})
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
assert.NoError(t, err)
// check db status

View File

@@ -17,7 +17,6 @@ import (
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
@@ -37,7 +36,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var wrappedBlocks []*bridgeTypes.WrappedBlock
var wrappedBlocks []*types.WrappedBlock
for i := 0; i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(int64(i)),
@@ -45,7 +44,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
Header: &header,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},

View File

@@ -1,236 +0,0 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/abi"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
if i == 0 {
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

View File

@@ -11,7 +11,7 @@ import (
func TestNewBatchHeader(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
@@ -36,7 +36,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
// 1 L1 Msg in 1 bitmap
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
@@ -54,7 +54,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_05.json")
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{}
@@ -87,7 +87,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many sparse L1 Msgs in 1 bitmap
templateBlockTrace4, err := os.ReadFile("../../../common/testdata/blockTrace_06.json")
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
assert.NoError(t, err)
wrappedBlock4 := &WrappedBlock{}
@@ -106,7 +106,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many L1 Msgs in each of 2 bitmaps
templateBlockTrace5, err := os.ReadFile("../../../common/testdata/blockTrace_07.json")
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
assert.NoError(t, err)
wrappedBlock5 := &WrappedBlock{}
@@ -127,7 +127,7 @@ func TestNewBatchHeader(t *testing.T) {
func TestBatchHeaderEncode(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
@@ -154,7 +154,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
// With L1 Msg
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
@@ -174,7 +174,7 @@ func TestBatchHeaderEncode(t *testing.T) {
func TestBatchHeaderHash(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
@@ -199,7 +199,7 @@ func TestBatchHeaderHash(t *testing.T) {
hash := batchHeader.Hash()
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
@@ -216,7 +216,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
// With L1 Msg
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{}

View File

@@ -1,143 +0,0 @@
package types
import (
"encoding/json"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &geth_types.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}
func TestNewBatchData(t *testing.T) {
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
parentBatch := &BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
StateRoot: "0x0000000000000000000000000000000000000000",
}
batchData1 := NewBatchData(parentBatch, []*WrappedBlock{wrappedBlock}, nil)
assert.NotNil(t, batchData1)
assert.NotNil(t, batchData1.Batch)
assert.Equal(t, "0xac4487c0d8f429dafda3c68cbb8983ac08af83c03c83c365d7df02864f80af37", batchData1.Hash().Hex())
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
parentBatch2 := &BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.Hex(),
}
batchData2 := NewBatchData(parentBatch2, []*WrappedBlock{wrappedBlock2}, nil)
assert.NotNil(t, batchData2)
assert.NotNil(t, batchData2.Batch)
assert.Equal(t, "0x8f1447573740b3e75b979879866b8ad02eecf88e1946275eb8cf14ab95876efc", batchData2.Hash().Hex())
}
func TestBatchDataTimestamp(t *testing.T) {
// Test case 1: when the batch data contains no blocks.
assert.Equal(t, uint64(0), (&BatchData{}).Timestamp())
// Test case 2: when the batch data contains blocks.
batchData := &BatchData{
Batch: abi.IScrollChainBatch{
Blocks: []abi.IScrollChainBlockContext{
{Timestamp: 123456789},
{Timestamp: 234567891},
},
},
}
assert.Equal(t, uint64(123456789), batchData.Timestamp())
}

View File

@@ -6,9 +6,13 @@ import (
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
@@ -22,7 +26,7 @@ type WrappedBlock struct {
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == 0x7E {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
@@ -59,3 +63,74 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
return bytes, nil
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -32,7 +32,7 @@ func TestChunkEncode(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
// Test case 3: when the chunk contains one block.
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
@@ -50,7 +50,7 @@ func TestChunkEncode(t *testing.T) {
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
// Test case 4: when the chunk contains one block with 1 L1MsgTx
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
@@ -92,7 +92,7 @@ func TestChunkHash(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks is 0")
// Test case 2: successfully hashing a chunk on one block
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
@@ -106,7 +106,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex())
// Test case 3: successfully hashing a chunk on two blocks
templateBlockTrace1, err := os.ReadFile("../../../common/testdata/blockTrace_03.json")
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock1 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
@@ -121,7 +121,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex())
// Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))

View File

@@ -4,7 +4,6 @@ package types
import (
"database/sql"
"fmt"
"time"
)
// L1BlockStatus represents current l1 block processing status
@@ -157,22 +156,6 @@ type RollerStatus struct {
Status RollerProveStatus `json:"status"`
}
// SessionInfo is assigned rollers info of a block batch (session)
type SessionInfo struct {
ID int `json:"id" db:"id"`
TaskID string `json:"task_id" db:"task_id"`
RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
ProveType int16 `json:"prove_type" db:"prove_type"`
RollerName string `json:"roller_name" db:"roller_name"`
ProvingStatus int16 `json:"proving_status" db:"proving_status"`
FailureType int16 `json:"failure_type" db:"failure_type"`
Reward uint64 `json:"reward" db:"reward"`
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
type ProvingStatus int
@@ -235,44 +218,3 @@ const (
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
RollupFinalizeFailed
)
// BlockBatch is structure of stored block_batch
type BlockBatch struct {
Hash string `json:"hash" db:"hash"`
Index uint64 `json:"index" db:"index"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
StateRoot string `json:"state_root" db:"state_root"`
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}
// AggTask is a wrapper type around db AggProveTask type.
type AggTask struct {
ID string `json:"id" db:"id"`
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
}

View File

@@ -23,25 +23,25 @@ const (
StatusProofError
)
// ProveType represents the type of roller.
type ProveType uint8
// ProofType represents the type of roller.
type ProofType uint8
func (r ProveType) String() string {
func (r ProofType) String() string {
switch r {
case BasicProve:
return "Basic Prove"
case AggregatorProve:
return "Aggregator Prove"
case ProofTypeChunk:
return "proof type chunk"
case ProofTypeBatch:
return "proof type batch"
default:
return "Illegal Prove type"
return "illegal proof type"
}
}
const (
// BasicProve is default roller, it only generates zk proof from traces.
BasicProve ProveType = iota
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof.
AggregatorProve
// ProofTypeChunk is default roller, it only generates zk proof from traces.
ProofTypeChunk ProofType = iota
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch
)
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
@@ -59,7 +59,7 @@ type Identity struct {
// Roller name
Name string `json:"name"`
// Roller RollerType
RollerType ProveType `json:"roller_type,omitempty"`
RollerType ProofType `json:"roller_type,omitempty"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
@@ -203,7 +203,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
Type ProofType `json:"type,omitempty"`
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
// Only applicable for aggregator rollers.
@@ -214,7 +214,7 @@ type TaskMsg struct {
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
Type ProofType `json:"type,omitempty"`
Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"`

View File

@@ -49,7 +49,7 @@ func TestGenerateToken(t *testing.T) {
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Name: "testName",
RollerType: BasicProve,
RollerType: ProofTypeChunk,
Timestamp: uint32(1622428800),
Version: "testVersion",
Token: "testToken",
@@ -68,7 +68,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: BasicProve,
Type: ProofTypeChunk,
Status: StatusOk,
Proof: &AggProof{
Proof: []byte("testProof"),
@@ -96,7 +96,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
func TestProofDetailHash(t *testing.T) {
proofDetail := &ProofDetail{
ID: "testID",
Type: BasicProve,
Type: ProofTypeChunk,
Status: StatusOk,
Proof: &AggProof{
Proof: []byte("testProof"),
@@ -114,14 +114,14 @@ func TestProofDetailHash(t *testing.T) {
}
func TestProveTypeString(t *testing.T) {
basicProve := ProveType(0)
assert.Equal(t, "Basic Prove", basicProve.String())
proofTypeChunk := ProofType(0)
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
aggregatorProve := ProveType(1)
assert.Equal(t, "Aggregator Prove", aggregatorProve.String())
proofTypeBatch := ProofType(1)
assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProve := ProveType(3)
assert.Equal(t, "Illegal Prove type", illegalProve.String())
illegalProof := ProofType(3)
assert.Equal(t, "illegal proof type", illegalProof.String())
}
func TestProofMsgPublicKey(t *testing.T) {
@@ -131,7 +131,7 @@ func TestProofMsgPublicKey(t *testing.T) {
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: BasicProve,
Type: ProofTypeChunk,
Status: StatusOk,
Proof: &AggProof{
Proof: []byte("testProof"),

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.7"
var tag = "v4.0.9"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -105,7 +105,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to,
uint256 _tokenId,
uint256 _amount
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -122,7 +122,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to,
uint256[] calldata _tokenIds,
uint256[] calldata _amounts
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -162,7 +162,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256 _tokenId,
uint256 _amount,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_amount > 0, "deposit zero amount");
address _l2Token = tokenMapping[_token];
@@ -200,7 +200,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256[] calldata _tokenIds,
uint256[] calldata _amounts,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to deposit");
require(_tokenIds.length == _amounts.length, "length mismatch");

View File

@@ -99,7 +99,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from,
address _to,
uint256 _tokenId
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -115,7 +115,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from,
address _to,
uint256[] calldata _tokenIds
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -155,7 +155,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256 _tokenId,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
address _l2Token = tokenMapping[_token];
require(_l2Token != address(0), "no corresponding l2 token");
@@ -188,7 +188,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256[] calldata _tokenIds,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to deposit");
address _l2Token = tokenMapping[_token];

View File

@@ -95,7 +95,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_amount > 0, "deposit zero eth");
// 1. Extract real sender if this call is from L1GatewayRouter.

View File

@@ -103,7 +103,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to,
uint256 _tokenId,
uint256 _amount
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l1Token != address(0), "token address cannot be 0");
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
@@ -120,7 +120,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to,
uint256[] calldata _tokenIds,
uint256[] calldata _amounts
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l1Token != address(0), "token address cannot be 0");
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
@@ -160,7 +160,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256 _tokenId,
uint256 _amount,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_amount > 0, "withdraw zero amount");
address _l1Token = tokenMapping[_token];
@@ -198,7 +198,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256[] calldata _tokenIds,
uint256[] calldata _amounts,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to withdraw");
require(_tokenIds.length == _amounts.length, "length mismatch");

View File

@@ -97,7 +97,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from,
address _to,
uint256 _tokenId
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l1Token != address(0), "token address cannot be 0");
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
@@ -113,7 +113,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from,
address _to,
uint256[] calldata _tokenIds
) external override onlyCallByCounterpart nonReentrant {
) external virtual onlyCallByCounterpart nonReentrant {
require(_l1Token != address(0), "token address cannot be 0");
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
@@ -153,7 +153,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256 _tokenId,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
address _l1Token = tokenMapping[_token];
require(_l1Token != address(0), "no corresponding l1 token");
@@ -188,7 +188,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256[] calldata _tokenIds,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to withdraw");
address _l1Token = tokenMapping[_token];

View File

@@ -88,7 +88,7 @@ contract L2ETHGateway is Initializable, ScrollGatewayBase, IL2ETHGateway {
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal nonReentrant {
) internal virtual nonReentrant {
require(msg.value > 0, "withdraw zero eth");
// 1. Extract real sender if this call is from L1GatewayRouter.

View File

@@ -1,7 +1,7 @@
{
"name": "@scroll-tech/contracts",
"description": "A library for interacting with Scroll contracts.",
"version": "0.0.3",
"version": "0.0.4",
"repository": {
"type": "git",
"url": "https://github.com/scroll-tech/scroll.git"

View File

@@ -51,9 +51,9 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
PublicKey: pk,
}
for id, sess := range m.sessions {
for _, sessionInfo := range sess.sessionInfos {
if sessionInfo.RollerPublicKey == pk {
info.ActiveSessionStartTime = *sessionInfo.CreatedAt
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pk {
info.ActiveSessionStartTime = proverTask.CreatedAt
info.ActiveSession = id
break
}
@@ -68,14 +68,14 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
now := time.Now()
var nameList []string
for _, sessionInfo := range sess.sessionInfos {
nameList = append(nameList, sessionInfo.RollerName)
for _, proverTask := range sess.proverTasks {
nameList = append(nameList, proverTask.ProverName)
}
info := SessionInfo{
ID: sess.taskID,
Status: status.String(),
AssignedRollers: nameList,
StartTime: *sess.sessionInfos[0].CreatedAt,
StartTime: sess.proverTasks[0].CreatedAt,
Error: errMsg,
}
if finished {

View File

@@ -15,7 +15,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
)
func geneAuthMsg(t *testing.T) *message.AuthMsg {
@@ -36,7 +36,7 @@ var rollerManager *Manager
func init() {
rmConfig := config.RollerManagerConfig{}
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
rollerManager, _ = New(context.Background(), &rmConfig, nil, nil)
rollerManager, _ = New(context.Background(), &rmConfig, nil)
}
func TestManager_RequestToken(t *testing.T) {

View File

@@ -6,78 +6,69 @@ import (
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/utils"
)
var (
// Set up Coordinator app info.
app *cli.App
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator"
app.Usage = "The Scroll L2 Coordinator"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, utils.CoordinatorApp)
cutils.RegisterSimulation(app, cutils.CoordinatorApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
db, err := utils.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return err
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
if err != nil {
return err
}
defer func() {
rollerManager.Stop()
err = ormFactory.Close()
if err != nil {
if err = utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
subCtx, cancel := context.WithCancel(ctx.Context)
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(subCtx, cfg.RollerManagerConfig, db)
defer func() {
cancel()
rollerManager.Stop()
}()
if err != nil {
return err
}
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start roller manager", "error", err)
@@ -86,7 +77,7 @@ func action(ctx *cli.Context) error {
apis := rollerManager.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
handler, addr, err := cutils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
@@ -103,7 +94,7 @@ func action(ctx *cli.Context) error {
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(
handler, addr, err := cutils.StartWSEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(wsListenAddrFlag.Name),

View File

@@ -10,7 +10,7 @@ import (
"testing"
"time"
coordinatorConfig "scroll-tech/coordinator/config"
coordinatorConfig "scroll-tech/coordinator/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"

View File

@@ -11,24 +11,23 @@ require (
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
golang.org/x/sync v0.1.0
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.1
)
require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/text v0.10.0 // indirect
)
require (
@@ -42,7 +41,6 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.3 // indirect
@@ -55,7 +53,6 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -22,10 +22,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
@@ -33,32 +30,34 @@ github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -66,16 +65,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -92,11 +83,10 @@ github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
@@ -111,13 +101,14 @@ github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3h
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -125,51 +116,73 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=

View File

@@ -6,8 +6,6 @@ import (
"os"
"path/filepath"
"strings"
db_config "scroll-tech/database"
)
const (
@@ -44,7 +42,7 @@ type L2Config struct {
// Config load configuration items.
type Config struct {
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
DBConfig *DBConfig `json:"db_config"`
L2Config *L2Config `json:"l2_config"`
}
@@ -55,6 +53,16 @@ type VerifierConfig struct {
AggVkPath string `json:"agg_vk_path"`
}
// DBConfig db config
type DBConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driver_name"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))

View File

@@ -0,0 +1,254 @@
package orm
import (
"context"
"encoding/json"
"errors"
"time"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
const defaultBatchHeaderVersion = 0
// Batch represents a batch of chunks.
type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
ChunkProofsReady int16 `json:"chunk_proofs_ready" gorm:"column:chunk_proofs_ready;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBatch creates a new Batch database instance.
func NewBatch(db *gorm.DB) *Batch {
return &Batch{db: db}
}
// TableName returns the table name for the Batch model.
func (*Batch) TableName() string {
return "batch"
}
// GetUnassignedBatches retrieves unassigned batches based on the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch, error) {
if limit < 0 {
return nil, errors.New("limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
}
var batches []*Batch
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ? AND chunk_proofs_ready = ?", types.ProvingTaskUnassigned, 1)
db = db.Order("index ASC")
db = db.Limit(limit)
if err := db.Find(&batches).Error; err != nil {
return nil, err
}
return batches, nil
}
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
var assignedBatches []*Batch
err := o.db.WithContext(ctx).
Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)}).
Find(&assignedBatches).Error
if err != nil {
return nil, err
}
return assignedBatches, nil
}
// GetProvingStatusByHash retrieves the proving status of a batch given its hash.
func (o *Batch) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
var batch Batch
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proving_status")
db = db.Where("hash = ?", hash)
if err := db.Find(&batch).Error; err != nil {
return types.ProvingStatusUndefined, err
}
return types.ProvingStatus(batch.ProvingStatus), nil
}
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
var latestBatch Batch
err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error
if err != nil {
return nil, err
}
return &latestBatch, nil
}
// InsertBatch inserts a new batch into the database.
// for unit test
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err)
return nil, err
}
var batchIndex uint64
var parentBatchHash common.Hash
var totalL1MessagePoppedBefore uint64
var version uint8 = defaultBatchHeaderVersion
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// not batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err
}
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
version = parentBatchHeader.Version()
}
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil {
log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
return nil, err
}
numChunks := len(chunks)
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
ChunkProofsReady: 0,
}
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, err
}
return &newBatch, nil
}
// UpdateChunkProofsStatusByBatchHash updates the status of chunk_proofs_ready field for a given batch hash.
// The function will set the chunk_proofs_ready to the status provided.
func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHash string, isReady bool) error {
var chunkProofsStatus int16
if isReady {
chunkProofsStatus = 1
} else {
chunkProofsStatus = 0
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", batchHash)
return db.Update("chunk_proofs_ready", chunkProofsStatus).Error
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateProofByHash updates the batch proof by hash.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
return err
}

View File

@@ -0,0 +1,300 @@
package orm
import (
"context"
"encoding/json"
"errors"
"time"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
// Chunk represents a chunk of blocks in the database.
type Chunk struct {
db *gorm.DB `gorm:"-"`
// chunk
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewChunk creates a new Chunk database instance.
func NewChunk(db *gorm.DB) *Chunk {
return &Chunk{db: db}
}
// TableName returns the table name for the chunk model.
func (*Chunk) TableName() string {
return "chunk"
}
// GetUnassignedChunks retrieves unassigned chunks based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunks(ctx context.Context, limit int) ([]*Chunk, error) {
if limit < 0 {
return nil, errors.New("limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
}
var chunks []*Chunk
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
db = db.Order("index ASC")
db = db.Limit(limit)
if err := db.Find(&chunks).Error; err != nil {
return nil, err
}
return chunks, nil
}
// GetProofsByBatchHash retrieves the proofs associated with a specific batch hash.
// It returns a slice of decoded proofs (message.AggProof) obtained from the database.
// The returned proofs are sorted in ascending order by their associated chunk index.
func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*message.AggProof, error) {
var chunks []*Chunk
db := o.db.WithContext(ctx)
db = db.Where("batch_hash", batchHash)
db = db.Order("index ASC")
if err := db.Find(&chunks).Error; err != nil {
return nil, err
}
var proofs []*message.AggProof
for _, chunk := range chunks {
var proof message.AggProof
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
return nil, err
}
proofs = append(proofs, &proof)
}
return proofs, nil
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
var latestChunk Chunk
err := o.db.WithContext(ctx).
Order("index desc").
First(&latestChunk).Error
if err != nil {
return nil, err
}
return &latestChunk, nil
}
// GetProvingStatusByHash retrieves the proving status of a chunk given its hash.
func (o *Chunk) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
var chunk Chunk
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Select("proving_status")
db = db.Where("hash = ?", hash)
if err := db.Find(&chunk).Error; err != nil {
return types.ProvingStatusUndefined, err
}
return types.ProvingStatus(chunk.ProvingStatus), nil
}
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
var chunks []*Chunk
err := o.db.WithContext(ctx).Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)}).
Find(&chunks).Error
if err != nil {
return nil, err
}
return chunks, nil
}
// CheckIfBatchChunkProofsAreReady checks if all proofs for all chunks of a given batchHash are collected.
func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash string) (bool, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ? AND proving_status != ?", batchHash, types.ProvingTaskVerified)
err := db.Count(&count).Error
if err != nil {
return false, err
}
return count == 0, nil
}
// GetChunkBatchHash retrieves the batchHash of a given chunk.
func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string, error) {
var chunk Chunk
db := o.db.WithContext(ctx)
db = db.Where("hash = ?", chunkHash)
db = db.Select("batch_hash")
if err := db.First(&chunk).Error; err != nil {
return "", err
}
return chunk.BatchHash, nil
}
// InsertChunk inserts a new chunk into the database.
// for unit test
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
return nil, err
}
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
// not chunk record in the db, we then use default empty values for the creating chunk;
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, err
}
var totalL2TxGas uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
Hash: hash.Hex(),
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: totalL2TxNum,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}
if err := db.WithContext(ctx).Create(&newChunk).Error; err != nil {
log.Error("failed to insert chunk", "hash", hash, "err", err)
return nil, err
}
return &newChunk, nil
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
return db.Updates(updateFields).Error
}
// UpdateProofByHash updates the chunk proof by hash.
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
return db.Updates(updateFields).Error
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
// for unit test
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,109 @@
package orm
import (
"context"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L2Block represents a l2 block in the database.
type L2Block struct {
db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint64 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
}
// NewL2Block creates a new L2Block instance.
func NewL2Block(db *gorm.DB) *L2Block {
return &L2Block{db: db}
}
// TableName returns the name of the "l2_block" table.
func (*L2Block) TableName() string {
return "l2_block"
}
// GetL2BlocksByChunkHash retrieves the L2 blocks associated with the specified chunk hash.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string) ([]*types.WrappedBlock, error) {
var l2Blocks []L2Block
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash = ?", chunkHash)
db = db.Order("number ASC")
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err
}
var wrappedBlocks []*types.WrappedBlock
for _, v := range l2Blocks {
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
return wrappedBlocks, nil
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
var l2Blocks []L2Block
for _, block := range blocks {
header, err := json.Marshal(block.Header)
if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return err
}
txs, err := json.Marshal(block.Transactions)
if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return err
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint64(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
}
l2Blocks = append(l2Blocks, l2Block)
}
if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil {
log.Error("failed to insert l2Blocks", "err", err)
return err
}
return nil
}

View File

@@ -0,0 +1,86 @@
package orm
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/utils"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
)
var (
base *docker.App
db *gorm.DB
proverTaskOrm *ProverTask
)
func TestMain(m *testing.M) {
t := &testing.T{}
setupEnv(t)
defer tearDownEnv(t)
m.Run()
}
func setupEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
var err error
db, err = utils.InitDB(
&config.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
proverTaskOrm = NewProverTask(db)
}
func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
sqlDB.Close()
base.Free()
}
func TestProverTaskOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
proverTask := ProverTask{
TaskID: "test-hash",
ProverName: "roller-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
proverTask.ProvingStatus = int16(types.RollerProofValid)
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
}

View File

@@ -0,0 +1,76 @@
package orm
import (
"context"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// ProverTask is assigned rollers info of chunk/batch proof prover task
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`
ID int64 `json:"id" gorm:"column:id"`
TaskID string `json:"task_id" gorm:"column:task_id"`
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward uint64 `json:"reward" gorm:"column:reward;default:0"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
}
// NewProverTask creates a new ProverTask instance.
func NewProverTask(db *gorm.DB) *ProverTask {
return &ProverTask{db: db}
}
// TableName returns the name of the "prover_task" table.
func (*ProverTask) TableName() string {
return "prover_task"
}
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
// The returned prover task objects are sorted in ascending order by their ids.
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
if len(hashes) == 0 {
return nil, nil
}
var proverTasks []*ProverTask
db := o.db.WithContext(ctx)
db = db.Where("task_id IN ?", hashes)
db = db.Order("id asc")
if err := db.Find(&proverTasks).Error; err != nil {
return nil, err
}
return proverTasks, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, sessionInfo *ProverTask) error {
db := o.db.WithContext(ctx)
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status"}),
})
return db.Create(&sessionInfo).Error
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus) error {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", proofType, taskID, pk)
return db.Update("proving_status", status).Error
}

View File

@@ -0,0 +1,43 @@
package utils
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/coordinator/internal/config"
)
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Warn),
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -11,20 +11,19 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/exp/rand"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils/workerpool"
"scroll-tech/database"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/verifier"
)
@@ -50,15 +49,15 @@ const (
type rollerProofStatus struct {
id string
typ message.ProveType
typ message.ProofType
pk string
status types.RollerProveStatus
}
// Contains all the information on an ongoing proof generation session.
type session struct {
taskID string
sessionInfos []*types.SessionInfo
taskID string
proverTasks []*orm.ProverTask
// finish channel is used to pass the public key of the rollers who finished proving process.
finishChan chan rollerProofStatus
}
@@ -91,11 +90,11 @@ type Manager struct {
// incoming proofs.
verifier *verifier.Verifier
// db interface
orm database.OrmFactory
// l2geth client
*ethclient.Client
// orm interface
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
// Token cache
tokenCache *cache.Cache
@@ -108,7 +107,7 @@ type Manager struct {
// New returns a new instance of Manager. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `manager.Start`.
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory, client *ethclient.Client) (*Manager, error) {
func New(ctx context.Context, cfg *config.RollerManagerConfig, db *gorm.DB) (*Manager, error) {
v, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
return nil, err
@@ -122,8 +121,10 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
sessions: make(map[string]*session),
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
orm: orm,
Client: client,
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
verifierWorkerPool: workerpool.NewWorkerPool(cfg.MaxVerifierWorkers),
}, nil
@@ -162,48 +163,42 @@ func (m *Manager) isRunning() bool {
// Loop keeps the manager running.
func (m *Manager) Loop() {
var (
tick = time.NewTicker(time.Second * 2)
tasks []*types.BlockBatch
aggTasks []*types.AggTask
tick = time.NewTicker(time.Second * 2)
chunkTasks []*orm.Chunk
batchTasks []*orm.Batch
)
defer tick.Stop()
for {
select {
case <-tick.C:
// load and send aggregator tasks
if len(aggTasks) == 0 && m.orm != nil {
// load and send batch tasks
if len(batchTasks) == 0 {
var err error
aggTasks, err = m.orm.GetUnassignedAggTasks()
batchTasks, err = m.batchOrm.GetUnassignedBatches(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeBatch))
if err != nil {
log.Error("failed to get unassigned aggregator proving tasks", "error", err)
log.Error("failed to get unassigned batch proving tasks", "error", err)
continue
}
}
// Select aggregator type roller and send message
for len(aggTasks) > 0 && m.StartAggProofGenerationSession(aggTasks[0], nil) {
aggTasks = aggTasks[1:]
// Select batch type roller and send message
for len(batchTasks) > 0 && m.StartBatchProofGenerationSession(batchTasks[0], nil) {
batchTasks = batchTasks[1:]
}
// load and send basic tasks
if len(tasks) == 0 && m.orm != nil {
var err error
// load and send chunk tasks
if len(chunkTasks) == 0 {
// TODO: add cache
if tasks, err = m.orm.GetBlockBatches(
map[string]interface{}{"proving_status": types.ProvingTaskUnassigned},
fmt.Sprintf(
"ORDER BY index %s LIMIT %d;",
m.cfg.OrderSession,
m.GetNumberOfIdleRollers(message.BasicProve),
),
); err != nil {
log.Error("failed to get unassigned basic proving tasks", "error", err)
var err error
chunkTasks, err = m.chunkOrm.GetUnassignedChunks(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeChunk))
if err != nil {
log.Error("failed to get unassigned chunk proving tasks", "error", err)
continue
}
}
// Select basic type roller and send message
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
tasks = tasks[1:]
// Select chunk type roller and send message
for len(chunkTasks) > 0 && m.StartChunkProofGenerationSession(chunkTasks[0], nil) {
chunkTasks = chunkTasks[1:]
}
case <-m.ctx.Done():
if m.ctx.Err() != nil {
@@ -218,55 +213,50 @@ func (m *Manager) Loop() {
}
func (m *Manager) restorePrevSessions() {
// m.orm may be nil in scroll tests
if m.orm == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
var hashes []string
// load assigned aggregator tasks from db
aggTasks, err := m.orm.GetAssignedAggTasks()
// load assigned batch tasks from db
batchTasks, err := m.batchOrm.GetAssignedBatches(m.ctx)
if err != nil {
log.Error("failed to load assigned aggregator tasks from db", "error", err)
log.Error("failed to load assigned batch tasks from db", "error", err)
return
}
for _, aggTask := range aggTasks {
hashes = append(hashes, aggTask.ID)
for _, batchTask := range batchTasks {
hashes = append(hashes, batchTask.Hash)
}
// load assigned basic tasks from db
batchHashes, err := m.orm.GetAssignedBatchHashes()
// load assigned chunk tasks from db
chunkTasks, err := m.chunkOrm.GetAssignedChunks(m.ctx)
if err != nil {
log.Error("failed to get assigned batch batchHashes from db", "error", err)
return
}
hashes = append(hashes, batchHashes...)
prevSessions, err := m.orm.GetSessionInfosByHashes(hashes)
for _, chunkTask := range chunkTasks {
hashes = append(hashes, chunkTask.Hash)
}
prevSessions, err := m.proverTaskOrm.GetProverTasksByHashes(m.ctx, hashes)
if err != nil {
log.Error("failed to recover roller session info from db", "error", err)
return
}
sessionInfosMaps := make(map[string][]*types.SessionInfo)
proverTasksMaps := make(map[string][]*orm.ProverTask)
for _, v := range prevSessions {
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
v.RollerName, "prove type", v.ProveType, "public key", v.RollerPublicKey, "proof status", v.ProvingStatus)
sessionInfosMaps[v.TaskID] = append(sessionInfosMaps[v.TaskID], v)
v.ProverName, "proof type", v.TaskType, "public key", v.ProverPublicKey, "proof status", v.ProvingStatus)
proverTasksMaps[v.TaskID] = append(proverTasksMaps[v.TaskID], v)
}
for taskID, sessionInfos := range sessionInfosMaps {
for taskID, proverTasks := range proverTasksMaps {
sess := &session{
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[taskID] = sess
go m.CollectProofs(sess)
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
@@ -286,52 +276,52 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
}
var tmpSessionInfo *types.SessionInfo
for _, si := range sess.sessionInfos {
var proverTask *orm.ProverTask
for _, si := range sess.proverTasks {
// get the send session info of this proof msg
if si.TaskID == msg.ID && si.RollerPublicKey == pk {
tmpSessionInfo = si
if si.TaskID == msg.ID && si.ProverPublicKey == pk {
proverTask = si
}
}
if tmpSessionInfo == nil {
if proverTask == nil {
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
}
proofTime := time.Since(*tmpSessionInfo.CreatedAt)
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// Ensure this roller is eligible to participate in the session.
if types.RollerProveStatus(tmpSessionInfo.ProvingStatus) == types.RollerProofValid {
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey,
"prove type", tmpSessionInfo.ProveType,
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", proverTask.TaskType,
"proof id", msg.ID,
)
return nil
}
log.Info("handling zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName, "roller pk",
tmpSessionInfo.RollerPublicKey, "prove type", tmpSessionInfo.ProveType, "proof time", proofTimeSec)
log.Info("handling zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName, "roller pk",
proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof time", proofTimeSec)
defer func() {
// TODO: maybe we should use db tx for the whole process?
// Roll back current proof's status.
if dbErr != nil {
if msg.Type == message.BasicProve {
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset basic task status as Unassigned", "msg.ID", msg.ID)
if msg.Type == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset chunk task status as Unassigned", "msg.ID", msg.ID)
}
}
if msg.Type == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset aggregator task status as Unassigned", "msg.ID", msg.ID)
if msg.Type == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset batch task status as Unassigned", "msg.ID", msg.ID)
}
}
}
@@ -346,13 +336,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
if msg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info(
"proof generated by roller failed",
"proof id", msg.ID,
"roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey,
"prove type", msg.Type,
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", msg.Type,
"proof time", proofTimeSec,
"error", msg.Error,
)
@@ -360,19 +350,23 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}
// store proof content
if msg.Type == message.BasicProve {
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store basic proof into db", "error", dbErr)
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store chunk proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update basic task status as proved", "error", dbErr)
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update chunk task status as proved", "error", dbErr)
return dbErr
}
}
if msg.Type == message.AggregatorProve {
if dbErr = m.orm.UpdateProofForAggTask(msg.ID, msg.Proof); dbErr != nil {
log.Error("failed to store aggregator proof into db", "error", dbErr)
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store batch proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update batch task status as proved", "error", dbErr)
return dbErr
}
}
@@ -380,31 +374,35 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
coordinatorProofsReceivedTotalCounter.Inc(1)
var verifyErr error
// TODO: wrap both basic verifier and aggregator verifier
// TODO: wrap both chunk verifier and batch verifier
success, verifyErr = m.verifyProof(msg.Proof)
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
}
if success {
if msg.Type == message.AggregatorProve {
if dbErr = m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update aggregator proving_status",
"failed to update chunk proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
if err := m.checkAreAllChunkProofsReady(msg.ID); err != nil {
log.Error("failed to check are all chunk proofs ready", "error", err)
return err
}
}
if msg.Type == message.BasicProve {
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update basic proving_status",
"failed to update batch proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
@@ -413,27 +411,46 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
// checkAttempts use the count of session info to check the attempts
func (m *Manager) checkAreAllChunkProofsReady(chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(m.ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(m.ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(m.ctx, batchHash, true)
if err != nil {
return err
}
}
return nil
}
// checkAttempts use the count of prover task info to check the attempts
func (m *Manager) checkAttemptsExceeded(hash string) bool {
sessionInfos, err := m.orm.GetSessionInfosByHashes([]string{hash})
proverTasks, err := m.proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{hash})
if err != nil {
log.Error("get session info error", "hash id", hash, "error", err)
return true
}
if len(sessionInfos) >= int(m.cfg.SessionAttempts) {
if len(proverTasks) >= int(m.cfg.SessionAttempts) {
return true
}
return false
@@ -450,15 +467,15 @@ func (m *Manager) CollectProofs(sess *session) {
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
if !m.checkAttemptsExceeded(sess.taskID) {
var success bool
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
success = m.StartAggProofGenerationSession(nil, sess)
} else if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
success = m.StartBasicProofGenerationSession(nil, sess)
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
success = m.StartBatchProofGenerationSession(nil, sess)
} else if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
success = m.StartChunkProofGenerationSession(nil, sess)
}
if success {
m.mu.Lock()
for _, v := range sess.sessionInfos {
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
m.mu.Unlock()
log.Info("Retrying session", "session id:", sess.taskID)
@@ -473,20 +490,20 @@ func (m *Manager) CollectProofs(sess *session) {
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
if err := m.orm.UpdateProvingStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset basic task_status as Unassigned", "id", sess.taskID, "err", err)
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset chunk task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.taskID, "err", err)
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset batch task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
m.mu.Lock()
for _, v := range sess.sessionInfos {
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
delete(m.sessions, sess.taskID)
m.mu.Unlock()
@@ -496,28 +513,29 @@ func (m *Manager) CollectProofs(sess *session) {
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
case ret := <-sess.finishChan:
m.mu.Lock()
for idx := range sess.sessionInfos {
if sess.sessionInfos[idx].RollerPublicKey == ret.pk {
sess.sessionInfos[idx].ProvingStatus = int16(ret.status)
for idx := range sess.proverTasks {
if sess.proverTasks[idx].ProverPublicKey == ret.pk {
sess.proverTasks[idx].ProvingStatus = int16(ret.status)
}
}
if sess.isSessionFailed() {
if ret.typ == message.BasicProve {
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update basic proving_status as failed", "msg.ID", ret.id, "error", err)
if ret.typ == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if ret.typ == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
if ret.typ == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.orm.UpdateSessionInfoProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
log.Error("failed to update session info proving status",
"proof type", ret.typ, "task id", ret.id, "pk", ret.pk, "status", ret.status, "error", err)
}
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
@@ -529,9 +547,9 @@ func (m *Manager) CollectProofs(sess *session) {
randIndex := rand.Int63n(int64(len(validRollers)))
_ = validRollers[randIndex]
// TODO: reward winner
for _, sessionInfo := range sess.sessionInfos {
m.freeTaskIDForRoller(sessionInfo.RollerPublicKey, sessionInfo.TaskID)
delete(m.sessions, sessionInfo.TaskID)
for _, proverTask := range sess.proverTasks {
m.freeTaskIDForRoller(proverTask.ProverPublicKey, proverTask.TaskID)
delete(m.sessions, proverTask.TaskID)
}
m.mu.Unlock()
@@ -548,9 +566,9 @@ func (m *Manager) CollectProofs(sess *session) {
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
func (s *session) isRollersFinished() (bool, []string) {
var validRollers []string
for _, sessionInfo := range s.sessionInfos {
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
validRollers = append(validRollers, sessionInfo.RollerPublicKey)
validRollers = append(validRollers, sessionInfo.ProverPublicKey)
continue
}
@@ -565,7 +583,7 @@ func (s *session) isRollersFinished() (bool, []string) {
}
func (s *session) isSessionFailed() bool {
for _, sessionInfo := range s.sessionInfos {
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
return false
}
@@ -589,101 +607,100 @@ func (m *Manager) APIs() []rpc.API {
}
}
// StartBasicProofGenerationSession starts a basic proof generation session
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
// StartChunkProofGenerationSession starts a chunk proof generation session
func (m *Manager) StartChunkProofGenerationSession(task *orm.Chunk, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
if m.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
log.Warn("no idle chunk roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start basic proof generation session", "id", taskID)
log.Info("start chunk proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskUnassigned); err != nil {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
}
} else {
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskFailed); err != nil {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
}
}()
// Get block traces.
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": taskID})
// Get block hashes.
wrappedBlocks, err := m.l2BlockOrm.GetL2BlocksByChunkHash(m.ctx, taskID)
if err != nil {
log.Error(
"could not GetBlockInfos",
"batch_hash", taskID,
"Failed to fetch wrapped blocks",
"batch hash", taskID,
"error", err,
)
return false
}
blockHashes := make([]common.Hash, len(blockInfos))
for i, blockInfo := range blockInfos {
blockHashes[i] = common.HexToHash(blockInfo.Hash)
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
// Dispatch task to basic rollers.
var sessionInfos []*types.SessionInfo
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.BasicProve)
roller := m.selectRoller(message.ProofTypeChunk)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.BasicProve, BlockHashes: blockHashes}) {
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.ProofTypeChunk, BlockHashes: blockHashes}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
now := time.Now()
tmpSessionInfo := types.SessionInfo{
proverTask := orm.ProverTask{
TaskID: taskID,
RollerPublicKey: roller.PublicKey,
ProveType: int16(message.BasicProve),
RollerName: roller.Name,
CreatedAt: &now,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store session info.
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
// Store prover task info.
if err = m.proverTaskOrm.SetProverTask(m.ctx, &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
sessionInfos = append(sessionInfos, &tmpSessionInfo)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.BasicProve, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeChunk, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(sessionInfos) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle chunk rollers", m.GetNumberOfIdleRollers(message.ProofTypeChunk))
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateProvingStatus(taskID, types.ProvingTaskAssigned); err != nil {
if err = m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
@@ -694,27 +711,27 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
return true
}
// StartAggProofGenerationSession starts an aggregator proof generation.
func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSession *session) (success bool) {
// StartBatchProofGenerationSession starts an batch proof generation.
func (m *Manager) StartBatchProofGenerationSession(task *orm.Batch, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.ID
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
if m.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start aggregator proof generation session", "id", taskID)
log.Info("start batch proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskUnassigned); err != nil {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
} else if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskFailed); err != nil {
} else if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
@@ -722,17 +739,17 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
}()
// get agg task from db
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskID)
// get chunk proofs from db
chunkProofs, err := m.chunkOrm.GetProofsByBatchHash(m.ctx, taskID)
if err != nil {
log.Error("failed to get sub proofs for aggregator task", "id", taskID, "error", err)
log.Error("failed to get chunk proofs for batch task", "session id", taskID, "error", err)
return false
}
// Dispatch task to basic rollers.
var sessionInfos []*types.SessionInfo
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.AggregatorProve)
roller := m.selectRoller(message.ProofTypeBatch)
if roller == nil {
log.Info("selectRoller returns nil")
break
@@ -741,50 +758,49 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
// send trace to roller
if !roller.sendTask(&message.TaskMsg{
ID: taskID,
Type: message.AggregatorProve,
SubProofs: subProofs,
Type: message.ProofTypeBatch,
SubProofs: chunkProofs,
}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
now := time.Now()
tmpSessionInfo := types.SessionInfo{
proverTask := orm.ProverTask{
TaskID: taskID,
RollerPublicKey: roller.PublicKey,
ProveType: int16(message.AggregatorProve),
RollerName: roller.Name,
CreatedAt: &now,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store session info.
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
if err = m.proverTaskOrm.SetProverTask(context.Background(), &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
sessionInfos = append(sessionInfos, &tmpSessionInfo)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.AggregatorProve, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeBatch, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(sessionInfos) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle batch rollers", m.GetNumberOfIdleRollers(message.ProofTypeBatch))
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskAssigned); err != nil {
if err = m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()

View File

@@ -22,24 +22,36 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"gorm.io/gorm"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/coordinator"
client2 "scroll-tech/coordinator/client"
coordinator_config "scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/utils"
"scroll-tech/coordinator/verifier"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
cutils "scroll-tech/common/utils"
)
var (
base *docker.App
batchData *types.BatchData
dbCfg *config.DBConfig
base *docker.App
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
chunk *types.Chunk
)
func TestMain(m *testing.M) {
@@ -53,31 +65,48 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setEnv(t *testing.T) (err error) {
func setEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
dbCfg = &config.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
var err error
db, err = utils.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock := &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock); err != nil {
return err
}
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
parentBatch := &types.BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
assert.NoError(t, err)
return
chunk = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}}
assert.NoError(t, err)
}
func TestApis(t *testing.T) {
// Set up the test environment.
base = docker.NewDockerApp()
assert.True(t, assert.NoError(t, setEnv(t)), "failed to setup the test environment.")
setEnv(t)
t.Run("TestHandshake", testHandshake)
t.Run("TestFailedHandshake", testFailedHandshake)
@@ -97,30 +126,28 @@ func TestApis(t *testing.T) {
}
func testHandshake(t *testing.T) {
// Reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
roller := newMockRoller(t, "roller_test", wsURL)
defer roller.close()
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
defer roller1.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
defer roller2.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
}
func testFailedHandshake(t *testing.T) {
// Reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -174,37 +201,36 @@ func testFailedHandshake(t *testing.T) {
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
}
func testSeveralConnections(t *testing.T) {
// Reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
var (
batch = 100
batch = 200
eg = errgroup.Group{}
rollers = make([]*mockRoller, batch)
)
for i := 0; i < batch; i++ {
for i := 0; i < batch; i += 2 {
idx := i
eg.Go(func() error {
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL)
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL, message.ProofTypeChunk)
rollers[idx+1] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx+1), wsURL, message.ProofTypeBatch)
return nil
})
}
assert.NoError(t, eg.Wait())
// check roller's idle connections
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// close connection
for _, roller := range rollers {
@@ -218,7 +244,7 @@ func testSeveralConnections(t *testing.T) {
for {
select {
case <-tick:
if rollerManager.GetNumberOfIdleRollers(message.BasicProve) == 0 {
if rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return
}
case <-tickStop:
@@ -229,60 +255,65 @@ func testSeveralConnections(t *testing.T) {
}
func testValidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
// only roller 0 submits valid proof.
proofStatus := verifiedSuccess
if i > 0 {
proofStatus = generatedFailed
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
// only roller 0 & 1 submit valid proofs.
proofStatus := generatedFailed
if i <= 1 {
proofStatus = verifiedSuccess
}
rollers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
for i := range hashes {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashes[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(hashes) > 0 {
for {
select {
case <-tick:
status, err := l2db.GetProvingStatusByHash(hashes[0])
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if status == types.ProvingTaskVerified {
hashes = hashes[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
case <-tickStop:
t.Error("failed to check proof status")
@@ -292,24 +323,24 @@ func testValidProof(t *testing.T) {
}
func testInvalidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
}
defer func() {
@@ -318,29 +349,32 @@ func testInvalidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
assert.NoError(t, err)
for i := range hashes {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashes[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(hashes) > 0 {
for {
select {
case <-tick:
status, err := l2db.GetProvingStatusByHash(hashes[0])
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if status == types.ProvingTaskFailed {
hashes = hashes[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
return
}
case <-tickStop:
t.Error("failed to check proof status")
@@ -350,24 +384,24 @@ func testInvalidProof(t *testing.T) {
}
func testProofGeneratedFailed(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
}
defer func() {
@@ -376,29 +410,32 @@ func testProofGeneratedFailed(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
assert.NoError(t, err)
for i := range hashes {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashes[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(hashes) > 0 {
for {
select {
case <-tick:
status, err := l2db.GetProvingStatusByHash(hashes[0])
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if status == types.ProvingTaskFailed {
hashes = hashes[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
return
}
case <-tickStop:
t.Error("failed to check proof status")
@@ -408,87 +445,80 @@ func testProofGeneratedFailed(t *testing.T) {
}
func testTimedoutProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create first mock roller, that will not send any proof.
roller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL)
// create first chunk & batch mock roller, that will not send any proof.
chunkRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL, message.ProofTypeChunk)
batchRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL, message.ProofTypeBatch)
defer func() {
// close connection
roller1.close()
chunkRoller1.close()
batchRoller1.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
var (
hashesAssigned = make([]string, 1)
hashesVerified = make([]string, 1)
)
dbTx, err := l2db.Beginx()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
assert.NoError(t, err)
for i := range hashesAssigned {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashesAssigned[i] = batchData.Hash().Hex()
hashesVerified[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// verify proof status, it should be assigned, because roller didn't send any proof
ok := utils.TryTimes(30, func() bool {
status, err := l2db.GetProvingStatusByHash(hashesAssigned[0])
ok := cutils.TryTimes(30, func() bool {
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
return false
}
if status == types.ProvingTaskAssigned {
hashesAssigned = hashesAssigned[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
if err != nil {
return false
}
return len(hashesAssigned) == 0
return chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned
})
assert.Falsef(t, !ok, "failed to check proof status")
// create second mock roller, that will send valid proof.
roller2 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL)
roller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
chunkRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(2), wsURL, message.ProofTypeChunk)
chunkRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
batchRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(3), wsURL, message.ProofTypeBatch)
batchRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
defer func() {
// close connection
roller2.close()
chunkRoller2.close()
batchRoller2.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// verify proof status, it should be verified now, because second roller sent valid proof
ok = utils.TryTimes(200, func() bool {
status, err := l2db.GetProvingStatusByHash(hashesVerified[0])
ok = cutils.TryTimes(200, func() bool {
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
return false
}
if status == types.ProvingTaskVerified {
hashesVerified = hashesVerified[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
if err != nil {
return false
}
return len(hashesVerified) == 0
return chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified
})
assert.Falsef(t, !ok, "failed to check proof status")
}
func testIdleRollerSelection(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -497,7 +527,13 @@ func testIdleRollerSelection(t *testing.T) {
// create mock rollers.
rollers := make([]*mockRoller, 20)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
}
defer func() {
@@ -507,29 +543,32 @@ func testIdleRollerSelection(t *testing.T) {
}
}()
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers(message.BasicProve))
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
for i := range hashes {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashes[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(hashes) > 0 {
for {
select {
case <-tick:
status, err := l2db.GetProvingStatusByHash(hashes[0])
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if status == types.ProvingTaskVerified {
hashes = hashes[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
case <-tickStop:
t.Error("failed to check proof status")
@@ -539,77 +578,84 @@ func testIdleRollerSelection(t *testing.T) {
}
func testGracefulRestart(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range hashes {
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
hashes[i] = batchData.Hash().Hex()
}
assert.NoError(t, dbTx.Commit())
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// create mock roller
roller := newMockRoller(t, "roller_test", wsURL)
chunkRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
batchRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
// wait 10 seconds, coordinator restarts before roller submits proof
roller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
chunkRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
batchRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
// wait for coordinator to dispatch task
<-time.After(5 * time.Second)
// the coordinator will delete the roller if the subscription is closed.
roller.close()
chunkRoller.close()
batchRoller.close()
info, err := rollerManager.GetSessionInfo(dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
// Close rollerManager and ws handler.
handler.Shutdown(context.Background())
rollerManager.Stop()
// Setup new coordinator and ws server.
newRollerManager, newHandler := setupCoordinator(t, base.DBConfig, 1, wsURL)
newRollerManager, newHandler := setupCoordinator(t, 1, wsURL, false)
defer func() {
newHandler.Shutdown(context.Background())
newRollerManager.Stop()
}()
for i := range hashes {
info, err := newRollerManager.GetSessionInfo(hashes[i])
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
assert.NoError(t, err)
info, err = newRollerManager.GetSessionInfo(dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
// at this point, roller haven't submitted
status, err := l2db.GetProvingStatusByHash(hashes[i])
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, status)
}
// at this point, roller haven't submitted
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, status)
status, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, status) // chunk proofs not ready yet
// will overwrite the roller client for `SubmitProof`
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, verifiedSuccess)
defer roller.close()
chunkRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
batchRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
defer func() {
chunkRoller.close()
batchRoller.close()
}()
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(15 * time.Second)
)
for len(hashes) > 0 {
for {
select {
case <-tick:
// this proves that the roller submits to the new coordinator,
// because the roller client for `submitProof` has been overwritten
status, err := l2db.GetProvingStatusByHash(hashes[0])
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if status == types.ProvingTaskVerified {
hashes = hashes[1:]
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
case <-tickStop:
t.Error("failed to check proof status")
return
@@ -618,12 +664,9 @@ func testGracefulRestart(t *testing.T) {
}
func testListRollers(t *testing.T) {
// Create db handler and reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -633,11 +676,13 @@ func testListRollers(t *testing.T) {
"roller_test_1",
"roller_test_2",
"roller_test_3",
"roller_test_4",
}
roller1 := newMockRoller(t, names[0], wsURL)
roller2 := newMockRoller(t, names[1], wsURL)
roller3 := newMockRoller(t, names[2], wsURL)
roller1 := newMockRoller(t, names[0], wsURL, message.ProofTypeChunk)
roller2 := newMockRoller(t, names[1], wsURL, message.ProofTypeBatch)
roller3 := newMockRoller(t, names[2], wsURL, message.ProofTypeChunk)
roller4 := newMockRoller(t, names[3], wsURL, message.ProofTypeBatch)
defer func() {
roller1.close()
roller2.close()
@@ -653,8 +698,9 @@ func testListRollers(t *testing.T) {
sort.Strings(rollersName)
assert.True(t, reflect.DeepEqual(names, rollersName))
// test ListRollers if one roller closed.
// test ListRollers if two rollers closed.
roller3.close()
roller4.close()
// wait coordinator free completely
time.Sleep(time.Second * 5)
@@ -668,24 +714,28 @@ func testListRollers(t *testing.T) {
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
}
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
// Get db handler.
db, err := database.NewOrmFactory(dbCfg)
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (rollerManager *coordinator.Manager, handler *http.Server) {
db, err := utils.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
rollerManager, err = coordinator.New(context.Background(), &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
MaxVerifierWorkers: 10,
SessionAttempts: 2,
}, db, nil)
}, db)
assert.NoError(t, err)
assert.NoError(t, rollerManager.Start())
// start ws service
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
handler, _, err = cutils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
assert.NoError(t, err)
return rollerManager, handler
@@ -694,6 +744,7 @@ func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
@@ -705,13 +756,14 @@ type mockRoller struct {
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string) *mockRoller {
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
@@ -733,8 +785,9 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
Timestamp: uint32(time.Now().Unix()),
Name: r.rollerName,
Timestamp: uint32(time.Now().Unix()),
RollerType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
@@ -804,6 +857,7 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
Proof: &message.AggProof{},
},

View File

@@ -16,7 +16,7 @@ type rollerMetrics struct {
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
@@ -25,7 +25,7 @@ func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string)
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
@@ -34,7 +34,7 @@ func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string)
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
@@ -43,7 +43,7 @@ func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
@@ -52,7 +52,7 @@ func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d t
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}

View File

@@ -20,7 +20,7 @@ type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProveType
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
@@ -34,7 +34,7 @@ type rollerNode struct {
// Time of message creation
registerTime time.Time
*rollerMetrics
metrics *rollerMetrics
}
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
@@ -53,8 +53,8 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
defer m.mu.RUnlock()
taskIDs := cmap.New()
for id, sess := range m.sessions {
for _, sessionInfo := range sess.sessionInfos {
if sessionInfo.RollerPublicKey == pubkey && sessionInfo.ProvingStatus == int16(types.RollerAssigned) {
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pubkey && proverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(id, struct{}{})
}
}
@@ -74,20 +74,20 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
rollerMetrics: rMs,
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
m.rollerPool.Set(pubkey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
@@ -116,7 +116,7 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count int) {
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for _, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
@@ -128,7 +128,7 @@ func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count in
return count
}
func (m *Manager) selectRoller(rollerType message.ProveType) *rollerNode {
func (m *Manager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := m.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))

View File

@@ -5,7 +5,7 @@ package verifier
import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
)
const InvalidTestProof = "this is a invalid proof"

View File

@@ -16,7 +16,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
"scroll-tech/common/types/message"
)

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/verifier"
"github.com/stretchr/testify/assert"

View File

@@ -5,7 +5,6 @@ go 1.19
require (
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/mattn/go-sqlite3 v1.14.14
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/stretchr/testify v1.8.2
@@ -19,11 +18,11 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/tools v0.8.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect

View File

@@ -52,8 +52,6 @@ github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bC
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 7, int(cur))
assert.Equal(t, 6, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -1,38 +0,0 @@
-- +goose Up
-- +goose StatementBegin
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
create table block_trace
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
trace JSON NOT NULL,
batch_hash VARCHAR DEFAULT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL
);
create unique index block_trace_hash_uindex
on block_trace (hash);
create unique index block_trace_number_uindex
on block_trace (number);
create unique index block_trace_parent_uindex
on block_trace (number, parent_hash);
create unique index block_trace_parent_hash_uindex
on block_trace (hash, parent_hash);
create index block_trace_batch_hash_index
on block_trace (batch_hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_trace;
-- +goose StatementEnd

View File

@@ -12,7 +12,7 @@ create table l1_message
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
status INTEGER NOT NULL DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@@ -1,50 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -1,50 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -1,49 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table block_batch
(
hash VARCHAR NOT NULL,
index BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
start_block_hash VARCHAR NOT NULL,
end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
total_tx_num BIGINT NOT NULL,
total_l1_tx_num BIGINT NOT NULL,
total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL,
finalize_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
committed_at TIMESTAMP(0) DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index block_batch_hash_uindex
on block_batch (hash);
create unique index block_batch_index_uindex
on block_batch (index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_batch;
-- +goose StatementEnd

View File

@@ -15,6 +15,7 @@ create table batch
batch_header BYTEA NOT NULL,
-- proof
chunk_proofs_ready SMALLINT NOT NULL DEFAULT 0,
proving_status SMALLINT NOT NULL DEFAULT 1,
proof BYTEA DEFAULT NULL,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,

View File

@@ -1,27 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table session_info
(
id BIGSERIAL PRIMARY KEY,
task_id VARCHAR NOT NULL,
roller_public_key VARCHAR NOT NULL,
prove_type SMALLINT DEFAULT 0,
roller_name VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
failure_type SMALLINT DEFAULT 0,
reward BIGINT DEFAULT 0,
proof BYTEA DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL,
CONSTRAINT uk_session_unique UNIQUE (task_id, roller_public_key)
);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists session_info;
-- +goose StatementEnd

View File

@@ -1,33 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l1_block
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
import_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL
);
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
create unique index l1_block_number_uindex
on l1_block (number);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
create table prover_task
(
id BIGSERIAL PRIMARY KEY,
task_id VARCHAR NOT NULL,
prover_public_key VARCHAR NOT NULL,
prover_name VARCHAR NOT NULL,
task_type SMALLINT NOT NULL DEFAULT 0,
proving_status SMALLINT NOT NULL DEFAULT 0,
failure_type SMALLINT NOT NULL DEFAULT 0,
reward BIGINT NOT NULL DEFAULT 0,
proof BYTEA DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL,
CONSTRAINT uk_tasktype_taskid_publickey UNIQUE (task_type, task_id, prover_public_key)
);
comment
on column batch.proving_status is 'roller assigned, roller proof valid, roller proof invalid';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists prover_task;
-- +goose StatementEnd

View File

@@ -1,38 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table agg_task
(
id VARCHAR NOT NULL,
start_batch_index BIGINT NOT NULL,
start_batch_hash VARCHAR NOT NULL,
end_batch_index BIGINT NOT NULL,
end_batch_hash VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
proof BYTEA DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
);
create unique index agg_task_hash_uindex
on agg_task (id);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON agg_task FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists agg_task;
-- +goose StatementEnd

View File

@@ -1,104 +0,0 @@
package orm
import (
"encoding/json"
"github.com/jmoiron/sqlx"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
type aggTaskOrm struct {
db *sqlx.DB
}
var _ AggTaskOrm = (*aggTaskOrm)(nil)
// NewAggTaskOrm creates an AggTaskOrm instance
func NewAggTaskOrm(db *sqlx.DB) AggTaskOrm {
return &aggTaskOrm{db: db}
}
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error) {
var (
startIdx uint64
endIdx uint64
)
row := a.db.QueryRow("SELECT start_batch_index, end_batch_index FROM agg_task where id = $1", id)
err := row.Scan(&startIdx, &endIdx)
if err != nil {
return nil, err
}
rows, err := a.db.Queryx("SELECT proof FROM block_batch WHERE index>=$1 AND index<=$2 and proving_status = $3", startIdx, endIdx, types.ProvingTaskVerified)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var subProofs []*message.AggProof
for rows.Next() {
var proofByt []byte
err = rows.Scan(&proofByt)
if err != nil {
return nil, err
}
var proof message.AggProof
if err := json.Unmarshal(proofByt, &proof); err != nil {
return nil, err
}
subProofs = append(subProofs, &proof)
}
return subProofs, nil
}
func (a *aggTaskOrm) GetUnassignedAggTasks() ([]*types.AggTask, error) {
rows, err := a.db.Queryx("SELECT * FROM agg_task where proving_status = 1;")
if err != nil {
return nil, err
}
return a.rowsToAggTask(rows)
}
func (a *aggTaskOrm) GetAssignedAggTasks() ([]*types.AggTask, error) {
rows, err := a.db.Queryx(`SELECT * FROM agg_task WHERE proving_status IN ($1, $2)`, types.ProvingTaskAssigned, types.ProvingTaskProved)
if err != nil {
return nil, err
}
return a.rowsToAggTask(rows)
}
func (a *aggTaskOrm) InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error {
sqlStr := "INSERT INTO agg_task (id, start_batch_index, start_batch_hash, end_batch_index, end_batch_hash) VALUES ($1, $2, $3, $4, $5)"
_, err := a.db.Exec(sqlStr, id, startBatchIndex, startBatchHash, endBatchIndex, endBatchHash)
return err
}
func (a *aggTaskOrm) UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error {
_, err := a.db.Exec(a.db.Rebind("update agg_task set proving_status = ? where id = ?;"), status, aggTaskID)
return err
}
func (a *aggTaskOrm) UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error {
proofByt, err := json.Marshal(proof)
if err != nil {
return err
}
_, err = a.db.Exec(a.db.Rebind("update agg_task set proving_status = ?, proof = ? where id = ?;"), types.ProvingTaskProved, proofByt, aggTaskID)
return err
}
func (a *aggTaskOrm) rowsToAggTask(rows *sqlx.Rows) ([]*types.AggTask, error) {
var tasks []*types.AggTask
for rows.Next() {
task := new(types.AggTask)
err := rows.StructScan(task)
if err != nil {
return nil, err
}
tasks = append(tasks, task)
}
return tasks, nil
}

View File

@@ -1,413 +0,0 @@
package orm
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
type blockBatchOrm struct {
db *sqlx.DB
}
var _ BlockBatchOrm = (*blockBatchOrm)(nil)
// NewBlockBatchOrm create an blockBatchOrm instance
func NewBlockBatchOrm(db *sqlx.DB) BlockBatchOrm {
return &blockBatchOrm{db: db}
}
func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error) {
query := "SELECT * FROM block_batch WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := o.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var batches []*types.BlockBatch
for rows.Next() {
batch := &types.BlockBatch{}
if err = rows.StructScan(batch); err != nil {
break
}
batches = append(batches, batch)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
return batches, nil
}
func (o *blockBatchOrm) GetProvingStatusByHash(hash string) (types.ProvingStatus, error) {
row := o.db.QueryRow(`SELECT proving_status FROM block_batch WHERE hash = $1`, hash)
var status types.ProvingStatus
if err := row.Scan(&status); err != nil {
return types.ProvingStatusUndefined, err
}
return status, nil
}
func (o *blockBatchOrm) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
var proofBytes []byte
row := o.db.QueryRow(`SELECT proof FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified)
if err := row.Scan(&proofBytes); err != nil {
return nil, err
}
var proof message.AggProof
if err := json.Unmarshal(proofBytes, &proof); err != nil {
return nil, err
}
return &proof, nil
}
func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
db := o.db
if _, err := db.ExecContext(ctx,
db.Rebind(`UPDATE block_batch set proof = ?, proof_time_sec = ? where hash = ?;`),
proofBytes, proofTimeSec, hash,
); err != nil {
log.Error("failed to update proof", "err", err)
}
return nil
}
func (o *blockBatchOrm) UpdateProvingStatus(hash string, status types.ProvingStatus) error {
switch status {
case types.ProvingTaskAssigned:
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = ? where hash = ?;"), status, time.Now(), hash)
return err
case types.ProvingTaskUnassigned:
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = null where hash = ?;"), status, hash)
return err
case types.ProvingTaskProved, types.ProvingTaskVerified:
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, proved_at = ? where hash = ?;"), status, time.Now(), hash)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where hash = ?;"), status, hash)
return err
}
}
func (o *blockBatchOrm) ResetProvingStatusFor(before types.ProvingStatus) error {
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where proving_status = ?;"), types.ProvingTaskUnassigned, before)
return err
}
// func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error {
numBlocks := len(batchData.Batch.Blocks)
if _, err := dbTx.NamedExec(`INSERT INTO public.block_batch (hash, index, parent_hash, start_block_number, start_block_hash, end_block_number, end_block_hash, total_tx_num, total_l2_gas, state_root, total_l1_tx_num) VALUES (:hash, :index, :parent_hash, :start_block_number, :start_block_hash, :end_block_number, :end_block_hash, :total_tx_num, :total_l2_gas, :state_root, :total_l1_tx_num)`,
map[string]interface{}{
"hash": batchData.Hash().Hex(),
"index": batchData.Batch.BatchIndex,
"parent_hash": batchData.Batch.ParentBatchHash.Hex(),
"start_block_number": batchData.Batch.Blocks[0].BlockNumber,
"start_block_hash": batchData.Batch.Blocks[0].BlockHash.Hex(),
"end_block_number": batchData.Batch.Blocks[numBlocks-1].BlockNumber,
"end_block_hash": batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(),
"total_tx_num": batchData.TotalTxNum,
"total_l1_tx_num": batchData.TotalL1TxNum,
"total_l2_gas": batchData.TotalL2Gas,
"state_root": batchData.Batch.NewStateRoot.Hex(),
"created_at": time.Now(),
// "proving_status": ProvingTaskUnassigned, // actually no need, because we have default value in DB schema
// "rollup_status": RollupPending, // actually no need, because we have default value in DB schema
}); err != nil {
return err
}
return nil
}
func (o *blockBatchOrm) BatchRecordExist(hash string) (bool, error) {
var res int
err := o.db.QueryRow(o.db.Rebind(`SELECT 1 FROM block_batch where hash = ? limit 1;`), hash).Scan(&res)
if err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, types.RollupPending, limit)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var hashes []string
for rows.Next() {
var hash string
if err = rows.Scan(&hash); err != nil {
break
}
hashes = append(hashes, hash)
}
if len(hashes) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no pending batches in db", "err", err)
} else if err != nil {
return nil, err
}
return hashes, nil
}
func (o *blockBatchOrm) GetLatestBatch() (*types.BlockBatch, error) {
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch);`)
batch := &types.BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
}
return batch, nil
}
func (o *blockBatchOrm) GetLatestCommittedBatch() (*types.BlockBatch, error) {
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, types.RollupCommitted)
batch := &types.BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
}
return batch, nil
}
func (o *blockBatchOrm) GetLatestFinalizedBatch() (*types.BlockBatch, error) {
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, types.RollupFinalized)
batch := &types.BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
}
return batch, nil
}
func (o *blockBatchOrm) GetLatestFinalizingOrFinalizedBatch() (*types.BlockBatch, error) {
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1 or rollup_status = $2);`, types.RollupFinalizing, types.RollupFinalized)
batch := &types.BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
}
return batch, nil
}
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, types.RollupCommitted, limit)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var hashes []string
for rows.Next() {
var hash string
if err = rows.Scan(&hash); err != nil {
break
}
hashes = append(hashes, hash)
}
if len(hashes) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no committed batches in db", "err", err)
} else if err != nil {
return nil, err
}
return hashes, nil
}
func (o *blockBatchOrm) GetRollupStatus(hash string) (types.RollupStatus, error) {
row := o.db.QueryRow(`SELECT rollup_status FROM block_batch WHERE hash = $1`, hash)
var status types.RollupStatus
if err := row.Scan(&status); err != nil {
return types.RollupUndefined, err
}
return status, nil
}
func (o *blockBatchOrm) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
return make([]types.RollupStatus, 0), nil
}
query, args, err := sqlx.In("SELECT hash, rollup_status FROM block_batch WHERE hash IN (?);", hashes)
if err != nil {
return make([]types.RollupStatus, 0), err
}
// sqlx.In returns queries with the `?` bindvar, we can rebind it for our backend
query = o.db.Rebind(query)
rows, err := o.db.Query(query, args...)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
statusMap := make(map[string]types.RollupStatus)
for rows.Next() {
var hash string
var status types.RollupStatus
if err = rows.Scan(&hash, &status); err != nil {
break
}
statusMap[hash] = status
}
if err = rows.Err(); err != nil {
return nil, err
}
var statuses []types.RollupStatus
if err != nil {
return statuses, err
}
for _, hash := range hashes {
statuses = append(statuses, statusMap[hash])
}
return statuses, nil
}
func (o *blockBatchOrm) GetCommitTxHash(hash string) (sql.NullString, error) {
row := o.db.QueryRow(`SELECT commit_tx_hash FROM block_batch WHERE hash = $1`, hash)
var commitTXHash sql.NullString
if err := row.Scan(&commitTXHash); err != nil {
return sql.NullString{}, err
}
return commitTXHash, nil
}
func (o *blockBatchOrm) GetFinalizeTxHash(hash string) (sql.NullString, error) {
row := o.db.QueryRow(`SELECT finalize_tx_hash FROM block_batch WHERE hash = $1`, hash)
var finalizeTxHash sql.NullString
if err := row.Scan(&finalizeTxHash); err != nil {
return sql.NullString{}, err
}
return finalizeTxHash, nil
}
func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
switch status {
case types.RollupCommitted:
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, committed_at = ? where hash = ?;"), status, time.Now(), hash)
return err
case types.RollupFinalized:
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, finalized_at = ? where hash = ?;"), status, time.Now(), hash)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where hash = ?;"), status, hash)
return err
}
}
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
switch status {
case types.RollupCommitted:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where hash = ?;"), commitTxHash, status, time.Now(), hash)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where hash = ?;"), commitTxHash, status, hash)
return err
}
}
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
switch status {
case types.RollupFinalized:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where hash = ?;"), finalizeTxHash, status, time.Now(), hash)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where hash = ?;"), finalizeTxHash, status, hash)
return err
}
}
func (o *blockBatchOrm) GetAssignedBatchHashes() ([]string, error) {
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE proving_status IN ($1, $2)`, types.ProvingTaskAssigned, types.ProvingTaskProved)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var hashes []string
for rows.Next() {
var hash string
if err = rows.Scan(&hash); err != nil {
break
}
hashes = append(hashes, hash)
}
return hashes, nil
}
func (o *blockBatchOrm) GetBatchCount() (int64, error) {
row := o.db.QueryRow(`select count(*) from block_batch`)
var count int64
if err := row.Scan(&count); err != nil {
return -1, err
}
return count, nil
}
func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), types.RollupFinalizationSkipped, types.ProvingTaskSkipped, types.ProvingTaskFailed, types.RollupCommitted)
if err != nil {
return 0, err
}
count, err := res.RowsAffected()
if err != nil {
return 0, err
}
return count, nil
}
func (o *blockBatchOrm) UpdateL2OracleTxHash(ctx context.Context, hash, txHash string) error {
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_tx_hash = ? where hash = ?;"), txHash, hash); err != nil {
return err
}
return nil
}
func (o *blockBatchOrm) UpdateL2GasOracleStatus(ctx context.Context, hash string, status types.GasOracleStatus) error {
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_status = ? where hash = ?;"), status, hash); err != nil {
return err
}
return nil
}
func (o *blockBatchOrm) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_status = ?, oracle_tx_hash = ? where hash = ?;"), status, txHash, hash); err != nil {
return err
}
return nil
}

View File

@@ -1,209 +0,0 @@
package orm
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
)
type blockTraceOrm struct {
db *sqlx.DB
}
var _ BlockTraceOrm = (*blockTraceOrm)(nil)
// NewBlockTraceOrm create an blockTraceOrm instance
func NewBlockTraceOrm(db *sqlx.DB) BlockTraceOrm {
return &blockTraceOrm{db: db}
}
func (o *blockTraceOrm) IsL2BlockExists(number uint64) (bool, error) {
var res int
err := o.db.QueryRow(o.db.Rebind(`SELECT 1 from block_trace where number = ? limit 1;`), number).Scan(&res)
if err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
func (o *blockTraceOrm) GetL2BlocksLatestHeight() (int64, error) {
row := o.db.QueryRow("SELECT COALESCE(MAX(number), -1) FROM block_trace;")
var height int64
if err := row.Scan(&height); err != nil {
return -1, err
}
return height, nil
}
func (o *blockTraceOrm) GetL2WrappedBlocks(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
type Result struct {
Trace string
}
query := "SELECT trace FROM block_trace WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := o.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var wrappedBlocks []*types.WrappedBlock
for rows.Next() {
result := &Result{}
if err = rows.StructScan(result); err != nil {
break
}
wrappedBlock := types.WrappedBlock{}
err = json.Unmarshal([]byte(result.Trace), &wrappedBlock)
if err != nil {
break
}
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
return wrappedBlocks, nil
}
func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
query := "SELECT number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp FROM block_trace WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := o.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var blocks []*types.BlockInfo
for rows.Next() {
block := &types.BlockInfo{}
if err = rows.StructScan(block); err != nil {
break
}
blocks = append(blocks, block)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
return blocks, nil
}
func (o *blockTraceOrm) GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
query := "SELECT number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp FROM block_trace WHERE batch_hash is NULL "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := o.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var blocks []*types.BlockInfo
for rows.Next() {
block := &types.BlockInfo{}
if err = rows.StructScan(block); err != nil {
break
}
blocks = append(blocks, block)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
return blocks, nil
}
func (o *blockTraceOrm) GetL2BlockHashByNumber(number uint64) (*common.Hash, error) {
row := o.db.QueryRow(`SELECT hash FROM block_trace WHERE number = $1`, number)
var hashStr string
if err := row.Scan(&hashStr); err != nil {
return nil, err
}
hash := common.HexToHash(hashStr)
return &hash, nil
}
func (o *blockTraceOrm) InsertWrappedBlocks(blocks []*types.WrappedBlock) error {
blockMaps := make([]map[string]interface{}, len(blocks))
for i, block := range blocks {
number, hash, txNum, mtime := block.Header.Number.Int64(),
block.Header.Hash().String(),
len(block.Transactions),
block.Header.Time
gasCost := block.Header.GasUsed
data, err := json.Marshal(block)
if err != nil {
log.Error("failed to marshal block", "hash", hash, "err", err)
return err
}
blockMaps[i] = map[string]interface{}{
"number": number,
"hash": hash,
"parent_hash": block.Header.ParentHash.String(),
"trace": string(data),
"tx_num": txNum,
"gas_used": gasCost,
"block_timestamp": mtime,
}
}
_, err := o.db.NamedExec(`INSERT INTO public.block_trace (number, hash, parent_hash, trace, tx_num, gas_used, block_timestamp) VALUES (:number, :hash, :parent_hash, :trace, :tx_num, :gas_used, :block_timestamp);`, blockMaps)
if err != nil {
log.Error("failed to insert blockTraces", "err", err)
}
return err
}
func (o *blockTraceOrm) DeleteTracesByBatchHash(batchHash string) error {
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_hash = ?;"), "{}", batchHash); err != nil {
return err
}
return nil
}
// http://jmoiron.github.io/sqlx/#inQueries
// https://stackoverflow.com/questions/56568799/how-to-update-multiple-rows-using-sqlx
func (o *blockTraceOrm) SetBatchHashForL2BlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchHash string) error {
query := "UPDATE block_trace SET batch_hash=? WHERE number IN (?)"
qry, args, err := sqlx.In(query, batchHash, numbers)
if err != nil {
return err
}
if _, err := dbTx.Exec(dbTx.Rebind(qry), args...); err != nil {
return err
}
return nil
}

View File

@@ -1,124 +0,0 @@
package orm
import (
"context"
"database/sql"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// L1BlockOrm l1_block operation interface
type L1BlockOrm interface {
GetL1BlockInfos(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error)
InsertL1Blocks(ctx context.Context, blocks []*types.L1BlockInfo) error
DeleteHeaderRLPByBlockHash(ctx context.Context, blockHash string) error
UpdateImportTxHash(ctx context.Context, blockHash, txHash string) error
UpdateL1BlockStatus(ctx context.Context, blockHash string, status types.L1BlockStatus) error
UpdateL1BlockStatusAndImportTxHash(ctx context.Context, blockHash string, status types.L1BlockStatus, txHash string) error
UpdateL1OracleTxHash(ctx context.Context, blockHash, txHash string) error
UpdateL1GasOracleStatus(ctx context.Context, blockHash string, status types.GasOracleStatus) error
UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error
GetLatestL1BlockHeight() (uint64, error)
GetLatestImportedL1Block() (*types.L1BlockInfo, error)
}
// BlockTraceOrm block_trace operation interface
type BlockTraceOrm interface {
IsL2BlockExists(number uint64) (bool, error)
GetL2BlocksLatestHeight() (int64, error)
GetL2WrappedBlocks(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error)
GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_hash is NULL"
GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
GetL2BlockHashByNumber(number uint64) (*common.Hash, error)
DeleteTracesByBatchHash(batchHash string) error
InsertWrappedBlocks(blockTraces []*types.WrappedBlock) error
SetBatchHashForL2BlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchHash string) error
}
// SessionInfoOrm sessions info operation inte
type SessionInfoOrm interface {
GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error)
SetSessionInfo(rollersInfo *types.SessionInfo) error
UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error
}
// AggTaskOrm is aggregator task
type AggTaskOrm interface {
GetAssignedAggTasks() ([]*types.AggTask, error)
GetUnassignedAggTasks() ([]*types.AggTask, error)
GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error)
InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error
UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error
UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error
}
// BlockBatchOrm block_batch operation interface
type BlockBatchOrm interface {
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error)
GetProvingStatusByHash(hash string) (types.ProvingStatus, error)
GetVerifiedProofByHash(hash string) (*message.AggProof, error)
UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error
UpdateProvingStatus(hash string, status types.ProvingStatus) error
ResetProvingStatusFor(before types.ProvingStatus) error
NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error
BatchRecordExist(hash string) (bool, error)
GetPendingBatches(limit uint64) ([]string, error)
GetCommittedBatches(limit uint64) ([]string, error)
GetRollupStatus(hash string) (types.RollupStatus, error)
GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error)
GetLatestBatch() (*types.BlockBatch, error)
GetLatestCommittedBatch() (*types.BlockBatch, error)
GetLatestFinalizedBatch() (*types.BlockBatch, error)
GetLatestFinalizingOrFinalizedBatch() (*types.BlockBatch, error)
UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error
GetAssignedBatchHashes() ([]string, error)
UpdateSkippedBatches() (int64, error)
GetBatchCount() (int64, error)
UpdateL2OracleTxHash(ctx context.Context, hash, txHash string) error
UpdateL2GasOracleStatus(ctx context.Context, hash string, status types.GasOracleStatus) error
UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error
GetCommitTxHash(hash string) (sql.NullString, error) // for unit tests only
GetFinalizeTxHash(hash string) (sql.NullString, error) // for unit tests only
}
// L1MessageOrm is layer1 message db interface
type L1MessageOrm interface {
GetL1MessageByQueueIndex(queueIndex uint64) (*types.L1Message, error)
GetL1MessageByMsgHash(msgHash string) (*types.L1Message, error)
GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]*types.L1Message, error)
GetL1ProcessedQueueIndex() (int64, error)
SaveL1Messages(ctx context.Context, messages []*types.L1Message) error
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error
GetLayer1LatestWatchedHeight() (int64, error)
GetRelayL1MessageTxHash(queueIndex uint64) (sql.NullString, error) // for unit tests only
}
// L2MessageOrm is layer2 message db interface
type L2MessageOrm interface {
GetL2MessageByNonce(nonce uint64) (*types.L2Message, error)
GetL2MessageByMsgHash(msgHash string) (*types.L2Message, error)
MessageProofExist(nonce uint64) (bool, error)
GetMessageProofByNonce(nonce uint64) (string, error)
GetL2Messages(fields map[string]interface{}, args ...string) ([]*types.L2Message, error)
GetL2ProcessedNonce() (int64, error)
SaveL2Messages(ctx context.Context, messages []*types.L2Message) error
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error
UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error
GetLayer2LatestWatchedHeight() (int64, error)
GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
}

View File

@@ -1,150 +0,0 @@
package orm
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"scroll-tech/common/types"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
)
type l1BlockOrm struct {
db *sqlx.DB
}
var _ L1BlockOrm = (*l1BlockOrm)(nil)
// NewL1BlockOrm create an l1BlockOrm instance
func NewL1BlockOrm(db *sqlx.DB) L1BlockOrm {
return &l1BlockOrm{db: db}
}
func (l *l1BlockOrm) GetL1BlockInfos(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error) {
query := "SELECT * FROM l1_block WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
query += " ORDER BY number ASC"
db := l.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var blocks []*types.L1BlockInfo
for rows.Next() {
block := &types.L1BlockInfo{}
if err = rows.StructScan(block); err != nil {
break
}
blocks = append(blocks, block)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
return blocks, nil
}
func (l *l1BlockOrm) InsertL1Blocks(ctx context.Context, blocks []*types.L1BlockInfo) error {
if len(blocks) == 0 {
return nil
}
blockMaps := make([]map[string]interface{}, len(blocks))
for i, block := range blocks {
blockMaps[i] = map[string]interface{}{
"number": block.Number,
"hash": block.Hash,
"header_rlp": block.HeaderRLP,
"base_fee": block.BaseFee,
}
}
_, err := l.db.NamedExec(`INSERT INTO public.l1_block (number, hash, header_rlp, base_fee) VALUES (:number, :hash, :header_rlp, :base_fee);`, blockMaps)
if err != nil {
log.Error("failed to insert L1 Blocks", "err", err)
}
return err
}
func (l *l1BlockOrm) DeleteHeaderRLPByBlockHash(ctx context.Context, blockHash string) error {
if _, err := l.db.Exec(l.db.Rebind("update l1_block set header_rlp = ? where hash = ?;"), "", blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateImportTxHash(ctx context.Context, blockHash, txHash string) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set import_tx_hash = ? where hash = ?;"), txHash, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateL1BlockStatus(ctx context.Context, blockHash string, status types.L1BlockStatus) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set block_status = ? where hash = ?;"), status, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateL1BlockStatusAndImportTxHash(ctx context.Context, blockHash string, status types.L1BlockStatus, txHash string) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set block_status = ?, import_tx_hash = ? where hash = ?;"), status, txHash, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateL1OracleTxHash(ctx context.Context, blockHash, txHash string) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_tx_hash = ? where hash = ?;"), txHash, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateL1GasOracleStatus(ctx context.Context, blockHash string, status types.GasOracleStatus) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_status = ? where hash = ?;"), status, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_status = ?, oracle_tx_hash = ? where hash = ?;"), status, txHash, blockHash); err != nil {
return err
}
return nil
}
func (l *l1BlockOrm) GetLatestL1BlockHeight() (uint64, error) {
row := l.db.QueryRow("SELECT COALESCE(MAX(number), 0) FROM l1_block;")
var height uint64
if err := row.Scan(&height); err != nil {
return 0, err
}
return height, nil
}
func (l *l1BlockOrm) GetLatestImportedL1Block() (*types.L1BlockInfo, error) {
row := l.db.QueryRowx(`SELECT * FROM l1_block WHERE block_status = $1 ORDER BY index DESC;`, types.L1BlockImported)
block := &types.L1BlockInfo{}
if err := row.StructScan(block); err != nil {
return nil, err
}
return block, nil
}

View File

@@ -1,180 +0,0 @@
package orm
import (
"context"
"database/sql"
"errors"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
)
type l1MessageOrm struct {
db *sqlx.DB
}
var _ L1MessageOrm = (*l1MessageOrm)(nil)
// NewL1MessageOrm create an L1MessageOrm instance
func NewL1MessageOrm(db *sqlx.DB) L1MessageOrm {
return &l1MessageOrm{db: db}
}
// GetL1MessageByMsgHash fetch message by queue_index
func (m *l1MessageOrm) GetL1MessageByMsgHash(msgHash string) (*types.L1Message, error) {
msg := types.L1Message{}
row := m.db.QueryRowx(`SELECT queue_index, msg_hash, height, sender, target, value, gas_limit, calldata, layer1_hash, status FROM l1_message WHERE msg_hash = $1`, msgHash)
if err := row.StructScan(&msg); err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessageByQueueIndex fetch message by queue_index
func (m *l1MessageOrm) GetL1MessageByQueueIndex(queueIndex uint64) (*types.L1Message, error) {
msg := types.L1Message{}
row := m.db.QueryRowx(`SELECT queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, status FROM l1_message WHERE queue_index = $1`, queueIndex)
if err := row.StructScan(&msg); err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *l1MessageOrm) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]*types.L1Message, error) {
rows, err := m.db.Queryx(`SELECT queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY queue_index ASC LIMIT $2;`, status, limit)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var msgs []*types.L1Message
for rows.Next() {
msg := &types.L1Message{}
if err = rows.StructScan(&msg); err != nil {
break
}
msgs = append(msgs, msg)
}
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer1 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return msgs, nil
}
// GetL1ProcessedQueueIndex fetch latest processed message queue_index
func (m *l1MessageOrm) GetL1ProcessedQueueIndex() (int64, error) {
row := m.db.QueryRow(`SELECT MAX(queue_index) FROM l1_message WHERE status = $1;`, types.MsgConfirmed)
var queueIndex sql.NullInt64
if err := row.Scan(&queueIndex); err != nil {
if err == sql.ErrNoRows || !queueIndex.Valid {
// no row means no message
// since queueIndex starts with 0, return -1 as the processed queueIndex
return -1, nil
}
return 0, err
}
if queueIndex.Valid {
return queueIndex.Int64, nil
}
return -1, nil
}
// SaveL1Messages batch save a list of layer1 messages
func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*types.L1Message) error {
if len(messages) == 0 {
return nil
}
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"queue_index": msg.QueueIndex,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"gas_limit": msg.GasLimit,
"calldata": msg.Calldata,
"layer1_hash": msg.Layer1Hash,
}
}
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (queue_index, msg_hash, height, sender, target, value, gas_limit, calldata, layer1_hash) VALUES (:queue_index, :msg_hash, :height, :sender, :target, :value, :gas_limit, :calldata, :layer1_hash);`, messageMaps)
if err != nil {
queueIndices := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
queueIndices = append(queueIndices, msg.QueueIndex)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1Messages", "queueIndices", queueIndices, "heights", heights, "err", err)
}
return err
}
// UpdateLayer2Hash update corresponding layer2 hash, given message hash
func (m *l1MessageOrm) UpdateLayer2Hash(ctx context.Context, msgHash, layer2Hash string) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l1_message set layer2_hash = ? where msg_hash = ?;"), layer2Hash, msgHash); err != nil {
return err
}
return nil
}
// UpdateLayer1Status updates message stauts, given message hash
func (m *l1MessageOrm) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l1_message set status = ? where msg_hash = ?;"), status, msgHash); err != nil {
return err
}
return nil
}
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
func (m *l1MessageOrm) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l1_message set status = ?, layer2_hash = ? where msg_hash = ?;"), status, layer2Hash, msgHash); err != nil {
return err
}
return nil
}
// GetLayer1LatestWatchedHeight returns latest height stored in the table
func (m *l1MessageOrm) GetLayer1LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
row := m.db.QueryRow("SELECT MAX(height) FROM l1_message;")
var height sql.NullInt64
if err := row.Scan(&height); err != nil {
if err == sql.ErrNoRows || !height.Valid {
return -1, nil
}
return 0, err
}
if height.Valid {
return height.Int64, nil
}
return -1, nil
}
func (m *l1MessageOrm) GetRelayL1MessageTxHash(queueIndex uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE queue_index = $1`, queueIndex)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -1,217 +0,0 @@
package orm
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
)
type layer2MessageOrm struct {
db *sqlx.DB
}
var _ L2MessageOrm = (*layer2MessageOrm)(nil)
// NewL2MessageOrm create an L2MessageOrm instance
func NewL2MessageOrm(db *sqlx.DB) L2MessageOrm {
return &layer2MessageOrm{db: db}
}
// GetL2MessageByNonce fetch message by nonce
func (m *layer2MessageOrm) GetL2MessageByNonce(nonce uint64) (*types.L2Message, error) {
msg := types.L2Message{}
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash, status FROM l2_message WHERE nonce = $1`, nonce)
if err := row.StructScan(&msg); err != nil {
return nil, err
}
return &msg, nil
}
// GetL2MessageByMsgHash fetch message by message hash
func (m *layer2MessageOrm) GetL2MessageByMsgHash(msgHash string) (*types.L2Message, error) {
msg := types.L2Message{}
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash, status FROM l2_message WHERE msg_hash = $1`, msgHash)
if err := row.StructScan(&msg); err != nil {
return nil, err
}
return &msg, nil
}
// GetMessageProofByNonce fetch message by nonce
func (m *layer2MessageOrm) GetMessageProofByNonce(nonce uint64) (string, error) {
row := m.db.QueryRow(`SELECT proof FROM l2_message WHERE nonce = $1`, nonce)
var proof sql.NullString
if err := row.Scan(&proof); err != nil {
return "", err
}
if proof.Valid {
return proof.String, nil
}
return "", nil
}
// MessageProofExist fetch message by nonce
func (m *layer2MessageOrm) MessageProofExist(nonce uint64) (bool, error) {
err := m.db.QueryRow(`SELECT nonce FROM l2_message WHERE nonce = $1 and proof IS NOT NULL`, nonce).Scan(&nonce)
if err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
// GetL2ProcessedNonce fetch latest processed message nonce
func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
row := m.db.QueryRow(`SELECT MAX(nonce) FROM l2_message WHERE status = $1;`, types.MsgConfirmed)
// no row means no message
// since nonce starts with 0, return -1 as the processed nonce
var nonce sql.NullInt64
if err := row.Scan(&nonce); err != nil {
if err == sql.ErrNoRows || !nonce.Valid {
return -1, nil
}
return 0, err
}
if nonce.Valid {
return nonce.Int64, nil
}
return -1, nil
}
// GetL2MessagesByStatus fetch list of messages given msg status
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*types.L2Message, error) {
query := "SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := m.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var msgs []*types.L2Message
for rows.Next() {
msg := &types.L2Message{}
if err = rows.StructScan(&msg); err != nil {
break
}
msgs = append(msgs, msg)
}
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer2 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return msgs, nil
}
// SaveL2Messages batch save a list of layer2 messages
func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*types.L2Message) error {
if len(messages) == 0 {
return nil
}
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"nonce": msg.Nonce,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"calldata": msg.Calldata,
"layer2_hash": msg.Layer2Hash,
}
}
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, calldata, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :calldata, :layer2_hash);`, messageMaps)
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
nonces = append(nonces, msg.Nonce)
heights = append(heights, msg.Height)
}
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
}
return err
}
// UpdateLayer1Hash update corresponding layer1 hash, given message hash
func (m *layer2MessageOrm) UpdateLayer1Hash(ctx context.Context, msgHash, layer1Hash string) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set layer1_hash = ? where msg_hash = ?;"), layer1Hash, msgHash); err != nil {
return err
}
return nil
}
// UpdateMessageProof update corresponding message proof, given message nonce
func (m *layer2MessageOrm) UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set proof = ? where nonce = ?;"), proof, nonce); err != nil {
return err
}
return nil
}
// UpdateLayer2Status updates message stauts, given message hash
func (m *layer2MessageOrm) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set status = ? where msg_hash = ?;"), status, msgHash); err != nil {
return err
}
return nil
}
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
func (m *layer2MessageOrm) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set status = ?, layer1_hash = ? where msg_hash = ?;"), status, layer1Hash, msgHash); err != nil {
return err
}
return nil
}
// GetLayer2LatestWatchedHeight returns latest height stored in the table
func (m *layer2MessageOrm) GetLayer2LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
row := m.db.QueryRow("SELECT COALESCE(MAX(height), -1) FROM l2_message;")
var height int64
if err := row.Scan(&height); err != nil {
return -1, err
}
if height < 0 {
return -1, fmt.Errorf("could not get height due to database return negative")
}
return height, nil
}
func (m *layer2MessageOrm) GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer1_hash FROM l2_message WHERE nonce = $1`, nonce)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -1,65 +0,0 @@
package orm
import (
"context"
"github.com/jmoiron/sqlx"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
type sessionInfoOrm struct {
db *sqlx.DB
}
var _ SessionInfoOrm = (*sessionInfoOrm)(nil)
// NewSessionInfoOrm create an sessionInfoOrm instance
func NewSessionInfoOrm(db *sqlx.DB) SessionInfoOrm {
return &sessionInfoOrm{db: db}
}
func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error) {
if len(hashes) == 0 {
return nil, nil
}
query, args, err := sqlx.In("SELECT * FROM session_info WHERE task_id IN (?);", hashes)
if err != nil {
return nil, err
}
rows, err := o.db.Queryx(o.db.Rebind(query), args...)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
var sessionInfos []*types.SessionInfo
for rows.Next() {
var sessionInfo types.SessionInfo
if err = rows.StructScan(&sessionInfo); err != nil {
return nil, err
}
sessionInfos = append(sessionInfos, &sessionInfo)
}
if err = rows.Err(); err != nil {
return nil, err
}
return sessionInfos, nil
}
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *types.SessionInfo) error {
sqlStr := "INSERT INTO session_info (task_id, roller_public_key, prove_type, roller_name, proving_status, failure_type, reward, proof, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT (task_id, roller_public_key) DO UPDATE SET proving_status = EXCLUDED.proving_status;"
_, err := o.db.Exec(sqlStr, rollersInfo.TaskID, rollersInfo.RollerPublicKey, rollersInfo.ProveType, rollersInfo.RollerName,
rollersInfo.ProvingStatus, rollersInfo.FailureType, rollersInfo.Reward, rollersInfo.Proof, rollersInfo.CreatedAt)
return err
}
// UpdateSessionInfoProvingStatus update the session info proving status
func (o *sessionInfoOrm) UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error {
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update session_info set proving_status = ? where prove_type = ? and task_id = ? and roller_public_key = ? ;"), int(proveType), int(status), taskID, pk); err != nil {
return err
}
return nil
}

View File

@@ -3,33 +3,16 @@ package database
import (
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"scroll-tech/database/orm"
)
// OrmFactory include all ormFactory interface
type OrmFactory interface {
orm.BlockTraceOrm
orm.BlockBatchOrm
orm.L1BlockOrm
orm.L1MessageOrm
orm.L2MessageOrm
orm.SessionInfoOrm
orm.AggTaskOrm
GetDB() *sqlx.DB
Beginx() (*sqlx.Tx, error)
Close() error
}
type ormFactory struct {
orm.BlockTraceOrm
orm.BlockBatchOrm
orm.L1BlockOrm
orm.L1MessageOrm
orm.L2MessageOrm
orm.SessionInfoOrm
orm.AggTaskOrm
*sqlx.DB
db *sqlx.DB
}
// NewOrmFactory create an ormFactory factory include all ormFactory interface
@@ -47,21 +30,14 @@ func NewOrmFactory(cfg *DBConfig) (OrmFactory, error) {
}
return &ormFactory{
BlockTraceOrm: orm.NewBlockTraceOrm(db),
BlockBatchOrm: orm.NewBlockBatchOrm(db),
L1MessageOrm: orm.NewL1MessageOrm(db),
L2MessageOrm: orm.NewL2MessageOrm(db),
L1BlockOrm: orm.NewL1BlockOrm(db),
SessionInfoOrm: orm.NewSessionInfoOrm(db),
AggTaskOrm: orm.NewAggTaskOrm(db),
DB: db,
db: db,
}, nil
}
func (o *ormFactory) GetDB() *sqlx.DB {
return o.DB
return o.db
}
func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
return o.DB.Beginx()
return o.db.Beginx()
}

View File

@@ -1,537 +0,0 @@
package database_test
import (
"context"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"time"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
abi "scroll-tech/bridge/abi"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
)
var (
templateL1Message = []*types.L1Message{
{
QueueIndex: 1,
MsgHash: "msg_hash1",
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash0",
},
{
QueueIndex: 2,
MsgHash: "msg_hash2",
Height: 2,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash1",
},
}
templateL2Message = []*types.L2Message{
{
Nonce: 1,
MsgHash: "msg_hash1",
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer2Hash: "hash0",
},
{
Nonce: 2,
MsgHash: "msg_hash2",
Height: 2,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer2Hash: "hash1",
},
}
proof1 = &message.AggProof{
Proof: []byte{1},
FinalPair: []byte{2},
}
subProofs = []*message.AggProof{proof1}
aggTask1 = &types.AggTask{ID: "test-agg-1"}
aggTask2 = &types.AggTask{ID: "test-agg-2"}
wrappedBlock *types.WrappedBlock
batchData1 *types.BatchData
batchData2 *types.BatchData
base *docker.App
ormBlock orm.BlockTraceOrm
ormLayer1 orm.L1MessageOrm
ormLayer2 orm.L2MessageOrm
ormBatch orm.BlockBatchOrm
ormSession orm.SessionInfoOrm
ormAggTask orm.AggTaskOrm
)
func setupEnv(t *testing.T) error {
// Start postgres docker container.
base.RunDBImage(t)
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
db := factory.GetDB()
assert.NoError(t, migrate.ResetDB(db.DB))
// Init several orm handles.
ormBlock = orm.NewBlockTraceOrm(db)
ormLayer1 = orm.NewL1MessageOrm(db)
ormLayer2 = orm.NewL2MessageOrm(db)
ormBatch = orm.NewBlockBatchOrm(db)
ormSession = orm.NewSessionInfoOrm(db)
ormAggTask = orm.NewAggTaskOrm(db)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock); err != nil {
return err
}
parentBatch := &types.BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData1 = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
aggTask1.StartBatchIndex = batchData1.Batch.BatchIndex
aggTask1.EndBatchIndex = batchData1.Batch.BatchIndex
aggTask1.StartBatchHash = batchData1.Hash().Hex()
aggTask1.EndBatchHash = batchData1.Hash().Hex()
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock2 := &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
return err
}
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
}
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
// insert a fake empty block to batchData2
fakeBlockContext := abi.IScrollChainBlockContext{
BlockHash: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000dead"),
ParentHash: batchData2.Batch.Blocks[0].BlockHash,
BlockNumber: batchData2.Batch.Blocks[0].BlockNumber + 1,
BaseFee: new(big.Int).SetUint64(0),
Timestamp: 123456789,
GasLimit: 10000000000000000,
NumTransactions: 0,
NumL1Messages: 0,
}
batchData2.Batch.Blocks = append(batchData2.Batch.Blocks, fakeBlockContext)
batchData2.Batch.NewStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
return nil
}
// TestOrmFactory run several test cases.
func TestOrmFactory(t *testing.T) {
base = docker.NewDockerApp()
defer func() {
base.Free()
}()
t.Log("test database DSN is ", base.DBConfig.DSN)
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testOrmBlockTraces", testOrmBlockTraces)
t.Run("testOrmL1Message", testOrmL1Message)
t.Run("testOrmL2Message", testOrmL2Message)
t.Run("testOrmBlockBatch", testOrmBlockBatch)
t.Run("testOrmSessionInfo", testOrmSessionInfo)
t.Run("testOrmAggTask", testOrmAggTask)
}
func testOrmBlockTraces(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
res, err := ormBlock.GetL2WrappedBlocks(map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, true, len(res) == 0)
exist, err := ormBlock.IsL2BlockExists(wrappedBlock.Header.Number.Uint64())
assert.NoError(t, err)
assert.Equal(t, false, exist)
// Insert into db
assert.NoError(t, ormBlock.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock}))
res2, err := ormBlock.GetUnbatchedL2Blocks(map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, true, len(res2) == 1)
exist, err = ormBlock.IsL2BlockExists(wrappedBlock.Header.Number.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, exist)
res, err = ormBlock.GetL2WrappedBlocks(map[string]interface{}{
"hash": wrappedBlock.Header.Hash().String(),
})
assert.NoError(t, err)
assert.Equal(t, true, len(res) == 1)
// Compare trace
data1, err := json.Marshal(res[0])
assert.NoError(t, err)
data2, err := json.Marshal(wrappedBlock)
assert.NoError(t, err)
// check trace
assert.Equal(t, true, string(data1) == string(data2))
}
func testOrmL1Message(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
expected := "expect hash"
// Insert into db
err = ormLayer1.SaveL1Messages(context.Background(), templateL1Message)
assert.NoError(t, err)
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash1", types.MsgConfirmed)
assert.NoError(t, err)
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash2", types.MsgSubmitted)
assert.NoError(t, err)
err = ormLayer1.UpdateLayer2Hash(context.Background(), "msg_hash2", expected)
assert.NoError(t, err)
result, err := ormLayer1.GetL1ProcessedQueueIndex()
assert.NoError(t, err)
assert.Equal(t, int64(1), result)
height, err := ormLayer1.GetLayer1LatestWatchedHeight()
assert.NoError(t, err)
assert.Equal(t, int64(2), height)
msg, err := ormLayer1.GetL1MessageByMsgHash("msg_hash2")
assert.NoError(t, err)
assert.Equal(t, types.MsgSubmitted, msg.Status)
}
func testOrmL2Message(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
expected := "expect hash"
// Insert into db
err = ormLayer2.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash1", types.MsgConfirmed)
assert.NoError(t, err)
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash2", types.MsgSubmitted)
assert.NoError(t, err)
err = ormLayer2.UpdateLayer1Hash(context.Background(), "msg_hash2", expected)
assert.NoError(t, err)
result, err := ormLayer2.GetL2ProcessedNonce()
assert.NoError(t, err)
assert.Equal(t, int64(1), result)
height, err := ormLayer2.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
assert.Equal(t, int64(2), height)
msg, err := ormLayer2.GetL2MessageByMsgHash("msg_hash2")
assert.NoError(t, err)
assert.Equal(t, types.MsgSubmitted, msg.Status)
assert.Equal(t, msg.MsgHash, "msg_hash2")
}
// testOrmBlockBatch test rollup result table functions
func testOrmBlockBatch(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
dbTx, err := factory.Beginx()
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
assert.NoError(t, err)
batchHash1 := batchData1.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchHash1)
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData2)
assert.NoError(t, err)
batchHash2 := batchData2.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber,
batchData2.Batch.Blocks[1].BlockNumber}, batchHash2)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
batches, err := ormBatch.GetBlockBatches(map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, int(2), len(batches))
batcheHashes, err := ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(2), len(batcheHashes))
assert.Equal(t, batchHash1, batcheHashes[0])
assert.Equal(t, batchHash2, batcheHashes[1])
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commit_tx_1", types.RollupCommitted)
assert.NoError(t, err)
batcheHashes, err = ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(1), len(batcheHashes))
assert.Equal(t, batchHash2, batcheHashes[0])
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err)
provingStatus, err = ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskVerified, provingStatus)
rollupStatus, err := ormBatch.GetRollupStatus(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.RollupCommitted, rollupStatus)
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash1, "finalize_tx_1", types.RollupFinalized)
assert.NoError(t, err)
rollupStatus, err = ormBatch.GetRollupStatus(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalized, rollupStatus)
result, err := ormBatch.GetLatestFinalizedBatch()
assert.NoError(t, err)
assert.Equal(t, batchHash1, result.Hash)
status1, err := ormBatch.GetRollupStatus(batchHash1)
assert.NoError(t, err)
status2, err := ormBatch.GetRollupStatus(batchHash2)
assert.NoError(t, err)
assert.NotEqual(t, status1, status2)
statues, err := ormBatch.GetRollupStatusByHashList([]string{batchHash1, batchHash2, batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, statues[0], status1)
assert.Equal(t, statues[1], status2)
assert.Equal(t, statues[2], status1)
assert.Equal(t, statues[3], status2)
statues, err = ormBatch.GetRollupStatusByHashList([]string{batchHash2, batchHash1, batchHash2, batchHash1})
assert.NoError(t, err)
assert.Equal(t, statues[0], status2)
assert.Equal(t, statues[1], status1)
assert.Equal(t, statues[2], status2)
assert.Equal(t, statues[3], status1)
}
// testOrmSessionInfo test rollup result table functions
func testOrmSessionInfo(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
dbTx, err := factory.Beginx()
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
batchHash := batchData1.Hash().Hex()
assert.NoError(t, err)
assert.NoError(t, ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchHash))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskAssigned))
// empty
hashes, err := ormBatch.GetAssignedBatchHashes()
assert.NoError(t, err)
assert.Equal(t, 1, len(hashes))
sessionInfos, err := ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
now := time.Now()
sessionInfo := types.SessionInfo{
TaskID: batchHash,
RollerName: "roller-0",
RollerPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: &now,
}
// insert
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo.RollerName, sessionInfos[0].RollerName)
// update
sessionInfo.ProvingStatus = int16(types.RollerProofValid)
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo.ProvingStatus, sessionInfos[0].ProvingStatus)
// delete
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskVerified))
hashes, err = ormBatch.GetAssignedBatchHashes()
assert.NoError(t, err)
assert.Equal(t, 0, len(hashes))
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
}
func testOrmAggTask(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
// update block_batch with proof bytes
dbTx, err := factory.Beginx()
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
assert.NoError(t, err)
batchHash1 := batchData1.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchHash1)
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData2)
assert.NoError(t, err)
batchHash2 := batchData2.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber,
batchData2.Batch.Blocks[1].BlockNumber}, batchHash2)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
batches, err := ormBatch.GetBlockBatches(map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, int(2), len(batches))
batcheHashes, err := ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(2), len(batcheHashes))
assert.Equal(t, batchHash1, batcheHashes[0])
assert.Equal(t, batchHash2, batcheHashes[1])
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commit_tx_1", types.RollupCommitted)
assert.NoError(t, err)
batcheHashes, err = ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(1), len(batcheHashes))
assert.Equal(t, batchHash2, batcheHashes[0])
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err)
provingStatus, err = ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskVerified, provingStatus)
// set agg task into db
err = ormAggTask.InsertAggTask(aggTask1.ID, aggTask1.StartBatchIndex, aggTask1.StartBatchHash, aggTask1.EndBatchIndex, aggTask1.EndBatchHash)
assert.NoError(t, err)
err = ormAggTask.InsertAggTask(aggTask2.ID, aggTask2.StartBatchIndex, aggTask2.StartBatchHash, aggTask2.EndBatchIndex, aggTask2.EndBatchHash)
assert.NoError(t, err)
// get subProofs by hash
getsProofs, err := ormAggTask.GetSubProofsByAggTaskID(aggTask1.ID)
assert.NoError(t, err)
assert.Equal(t, subProofs, getsProofs)
// get unassigned agg tasks
tasks, err := ormAggTask.GetUnassignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, tasks[0].ID, aggTask1.ID)
assert.Equal(t, tasks[1].ID, aggTask2.ID)
// update status agg proof into db
err = ormAggTask.UpdateAggTaskStatus(aggTask1.ID, types.ProvingTaskAssigned)
assert.NoError(t, err)
// check assigned agg task
assigns, err := ormAggTask.GetAssignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, assigns[0].ID, aggTask1.ID)
// insert aggregator proof
err = ormAggTask.UpdateProofForAggTask(aggTask1.ID, &message.AggProof{})
assert.NoError(t, err)
// mark verified
err = ormAggTask.UpdateAggTaskStatus(aggTask1.ID, types.ProvingTaskVerified)
assert.NoError(t, err)
// get left unassigned task
unassignTasks, err := ormAggTask.GetUnassignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, unassignTasks[0].ID, aggTask2.ID)
}

View File

@@ -25,7 +25,7 @@ type Config struct {
type ProverConfig struct {
ParamsPath string `json:"params_path"`
SeedPath string `json:"seed_path"`
ProveType message.ProveType `json:"prove_type,omitempty"` // 0: basic roller (default type), 1: aggregator roller
ProofType message.ProofType `json:"prove_type,omitempty"` // 0: basic roller (default type), 1: aggregator roller
DumpDir string `json:"dump_dir,omitempty"`
}

View File

@@ -53,13 +53,13 @@ func NewProver(cfg *config.ProverConfig) (*Prover, error) {
// Prove call rust ffi to generate proof, if first failed, try again.
func (p *Prover) Prove(taskID string, traces []*types.BlockTrace) (*message.AggProof, error) {
var proofByt []byte
if p.cfg.ProveType == message.BasicProve {
if p.cfg.ProofType == message.ProofTypeChunk {
tracesByt, err := json.Marshal(traces)
if err != nil {
return nil, err
}
proofByt = p.prove(tracesByt)
} else if p.cfg.ProveType == message.AggregatorProve {
} else if p.cfg.ProofType == message.ProofTypeBatch {
// TODO: aggregator prove
}

View File

@@ -98,8 +98,8 @@ func NewRoller(cfg *config.Config) (*Roller, error) {
}
// Type returns roller type.
func (r *Roller) Type() message.ProveType {
return r.cfg.Prover.ProveType
func (r *Roller) Type() message.ProofType {
return r.cfg.Prover.ProofType
}
// PublicKey translate public key to hex and return.

View File

@@ -8,6 +8,7 @@ import (
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
@@ -68,6 +69,8 @@ func TestMonitorMetrics(t *testing.T) {
svrPort := strconv.FormatInt(port.Int64()+52000, 10)
coordinatorApp.RunApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
time.Sleep(time.Second)
// Get coordinator monitor metrics.
resp, err := http.Get("http://localhost:" + svrPort)
assert.NoError(t, err)