mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
faec817d34 | ||
|
|
72ef2cc80e | ||
|
|
8f0690be41 | ||
|
|
672c2dd49c | ||
|
|
3d9fce26b6 | ||
|
|
95124ce70e | ||
|
|
f8d4855f26 | ||
|
|
e303fafefc | ||
|
|
f00c400993 | ||
|
|
bb6428848f | ||
|
|
df97200a41 |
@@ -75,7 +75,7 @@ func (c *CrossMsgFetcher) Start() {
|
||||
return
|
||||
case <-tick.C:
|
||||
c.mu.Lock()
|
||||
c.forwardFetchAndSaveMissingEvents(0)
|
||||
c.forwardFetchAndSaveMissingEvents(1)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db"
|
||||
@@ -99,7 +98,7 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l1 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
@@ -120,11 +119,6 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
err = updateL1CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to update msgHash in L1 cross msg", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
@@ -157,11 +151,12 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l2 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL2CrossMsgs, msgHashes, relayedMsg, l2sentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
@@ -179,13 +174,7 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
|
||||
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2sentMsgs)
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
@@ -240,25 +229,3 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
create table cross_message
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL DEFAULT '',
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
@@ -12,30 +12,30 @@ create table cross_message
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
token_id BIGINT NOT NULL DEFAULT 0,
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
token_ids TEXT NOT NULL DEFAULT '',
|
||||
token_amounts TEXT NOT NULL DEFAULT '',
|
||||
block_timestamp TIMESTAMP(0) DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_msg_type
|
||||
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
|
||||
|
||||
comment
|
||||
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
|
||||
|
||||
comment
|
||||
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
|
||||
|
||||
comment
|
||||
on column cross_message.is_deleted is 'NotDeleted false, Deleted true';
|
||||
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX valid_l1_msg_index ON cross_message (layer1_hash, is_deleted);
|
||||
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
|
||||
|
||||
CREATE INDEX valid_l2_msg_index ON cross_message (layer2_hash, is_deleted);
|
||||
|
||||
CREATE INDEX valid_height_index ON cross_message (height, msg_type, is_deleted);
|
||||
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -49,22 +49,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON cross_message
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -7,17 +7,17 @@ create table relayed_msg
|
||||
height BIGINT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column relayed_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
create unique index uk_msg_hash_l1_hash_l2_hash
|
||||
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index relayed_msg_hash_uindex
|
||||
on relayed_msg (msg_hash);
|
||||
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -31,22 +31,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON relayed_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
create table l2_sent_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
original_sender VARCHAR NOT NULL DEFAULT '',
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
@@ -12,14 +13,16 @@ create table l2_sent_msg
|
||||
batch_index BIGINT NOT NULL DEFAULT 0,
|
||||
msg_proof TEXT NOT NULL DEFAULT '',
|
||||
msg_data TEXT NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_sent_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
create unique index uk_msg_hash
|
||||
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_nonce
|
||||
on l2_sent_msg (nonce) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -33,22 +36,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE l2_sent_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON l2_sent_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -8,12 +8,17 @@ create table rollup_batch
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_batch_index
|
||||
on rollup_batch (batch_index) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_batch_hash
|
||||
on rollup_batch (batch_hash) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
@@ -26,21 +31,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE rollup_batch SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON rollup_batch
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -2,7 +2,6 @@ package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -40,14 +39,6 @@ func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*Ro
|
||||
"start_block_number": batch.StartBlockNumber,
|
||||
"end_block_number": batch.EndBlockNumber,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM rollup_batch WHERE batch_index = $1 AND NOT is_deleted)`, batch.BatchIndex).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertRollupBatchDBTx: batch index %v already exists at height %v", batch.BatchIndex, batch.CommitHeight)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
|
||||
if err != nil {
|
||||
|
||||
@@ -40,31 +40,24 @@ const (
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenID uint64 `json:"token_id" db:"token_id"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenIDs string `json:"token_ids" db:"token_ids"`
|
||||
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
// L1CrossMsgOrm provides operations on l1_cross_message table
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -23,7 +22,7 @@ func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND NOT is_deleted;`, l1Hash.String(), Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -37,7 +36,7 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND NOT is_deleted;`, sender.String(), Layer1Msg)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
@@ -66,22 +65,15 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
"target": msg.Target,
|
||||
"amount": msg.Amount,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_id": msg.TokenID,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"msg_type": Layer1Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer1_hash = $1 AND NOT is_deleted)`, msg.Layer1Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL1CrossMsgDBTx: l1 cross msg layer1Hash %v already exists at height %v", msg.Layer1Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -92,7 +84,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
|
||||
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -100,7 +92,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -108,7 +100,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHas
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -123,21 +115,21 @@ func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET is_deleted = true WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -23,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -37,7 +36,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND NOT is_deleted;`, sender.String(), Layer2Msg)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
@@ -56,7 +55,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossM
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET is_deleted = true where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
if err != nil {
|
||||
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
|
||||
return err
|
||||
@@ -72,29 +71,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_id": msg.TokenID,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"amount": msg.Amount,
|
||||
"msg_type": Layer2Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted)`, msg.Layer2Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2CrossMsgDBTx: l2 cross msg layer2Hash %v already exists at height %v", msg.Layer2Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -103,21 +94,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -132,14 +123,14 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
|
||||
@@ -3,7 +3,6 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -11,20 +10,20 @@ import (
|
||||
)
|
||||
|
||||
type L2SentMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
OriginalSender string `json:"original_sender" db:"original_sender"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type l2SentMsgOrm struct {
|
||||
@@ -38,7 +37,7 @@ func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msgHash)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -53,35 +52,28 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM l2_sent_msg WHERE (msg_hash = $1 OR nonce = $2) AND NOT is_deleted)`, msg.MsgHash, msg.Nonce).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2SentMsgDBTx: l2 sent msg_hash %v already exists at height %v", msg.MsgHash, msg.Height)
|
||||
"original_sender": msg.OriginalSender,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "msg_Hash", "err", err)
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE NOT is_deleted ORDER BY nonce DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -96,14 +88,14 @@ func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND NOT is_deleted;"), proof, batch_index, msgHash); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND NOT is_deleted ORDER BY batch_index DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -119,7 +111,7 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND NOT is_deleted ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,7 +127,7 @@ func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight u
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND NOT is_deleted;`, nonce)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -145,7 +137,7 @@ func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND NOT is_deleted order by nonce desc limit 1`, endBlockNumber)
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -154,6 +146,6 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2Sen
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET is_deleted = true WHERE height > $1;`, height)
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
}
|
||||
|
||||
type relayedMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
@@ -33,7 +40,7 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", "err", err)
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -41,7 +48,7 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
|
||||
result := &RelayedMsg{}
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msg_hash)
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -52,7 +59,7 @@ func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -67,7 +74,7 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -82,11 +89,11 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, erro
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
if err != nil || rows == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: l2sentMsg.MsgProof,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
@@ -13,11 +12,6 @@ import (
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
type MsgHashWrapper struct {
|
||||
MsgHash common.Hash
|
||||
TxHash common.Hash
|
||||
}
|
||||
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
@@ -25,13 +19,13 @@ type CachedParsedTxCalldata struct {
|
||||
EndBlocks []uint64
|
||||
}
|
||||
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
var msgHash string
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
@@ -39,7 +33,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -48,13 +42,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -65,13 +60,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -81,14 +77,15 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -98,26 +95,26 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
|
||||
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
@@ -128,18 +125,17 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
}
|
||||
|
||||
}
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, nil
|
||||
return l1CrossMsg, relayedMsgs, nil
|
||||
}
|
||||
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
// this is use to confirm finalized l1 msg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var l2SentMsg []*orm.L2SentMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
var l2SentMsgs []*orm.L2SentMsg
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
@@ -147,8 +143,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -156,14 +153,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -179,8 +178,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -189,15 +189,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -206,7 +207,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
@@ -214,27 +215,26 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
l2SentMsg = append(l2SentMsg, &orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
l2SentMsgs = append(l2SentMsgs,
|
||||
&orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
@@ -244,7 +244,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
|
||||
}
|
||||
}
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, nil
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
|
||||
}
|
||||
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
|
||||
@@ -4,11 +4,8 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
@@ -25,7 +22,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
@@ -45,7 +41,7 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
@@ -64,7 +60,6 @@ require (
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -29,9 +29,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -69,12 +66,9 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@@ -86,9 +80,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -96,10 +87,8 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -113,10 +102,7 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
|
||||
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -175,7 +161,6 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -218,8 +203,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -240,13 +223,3 @@ gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1
|
||||
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
|
||||
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
|
||||
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
|
||||
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
|
||||
@@ -15,9 +15,10 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeUtils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -44,11 +43,10 @@ type Layer2Relayer struct {
|
||||
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
l2MessageOrm *orm.L2Message
|
||||
db *gorm.DB
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
@@ -120,10 +118,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
@@ -173,8 +170,8 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{{
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
@@ -193,7 +190,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
}
|
||||
|
||||
var batch *orm.Batch
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -236,7 +233,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
@@ -247,11 +244,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation, pending count: %d", r.rollupSender.PendingCount())
|
||||
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash)
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
@@ -261,7 +258,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
if !confirmation.IsSuccessful {
|
||||
return fmt.Errorf("import genesis batch tx failed")
|
||||
}
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash)
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -321,7 +318,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader)
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
|
||||
return
|
||||
@@ -348,7 +345,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
var wrappedBlocks []*bridgeTypes.WrappedBlock
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch wrapped blocks",
|
||||
@@ -356,7 +353,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
"end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
chunk := &types.Chunk{
|
||||
Blocks: wrappedBlocks,
|
||||
}
|
||||
var chunkBytes []byte
|
||||
@@ -521,7 +518,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
|
||||
// check whether it is CommitBatches transaction
|
||||
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
bridgeUtils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -49,7 +49,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
@@ -57,7 +57,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
@@ -76,7 +76,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
@@ -118,7 +118,7 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
|
||||
@@ -187,7 +187,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
@@ -235,10 +235,10 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,12 +25,12 @@ var (
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// l2 block
|
||||
wrappedBlock1 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock2 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
|
||||
// chunk
|
||||
chunk1 *bridgeTypes.Chunk
|
||||
chunk2 *bridgeTypes.Chunk
|
||||
chunk1 *types.Chunk
|
||||
chunk2 *types.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
)
|
||||
@@ -58,19 +58,19 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
|
||||
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
|
||||
chunkHash1, err = chunk1.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
|
||||
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
|
||||
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
@@ -154,8 +155,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
return dbChunks, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) {
|
||||
chunks := make([]*bridgeTypes.Chunk, len(dbChunks))
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
chunks := make([]*types.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
@@ -163,7 +164,7 @@ func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridge
|
||||
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
chunks[i] = &bridgeTypes.Chunk{
|
||||
chunks[i] = &types.Chunk{
|
||||
Blocks: wrappedBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -20,7 +19,7 @@ func testBatchProposer(t *testing.T) {
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
@@ -58,7 +59,7 @@ func (p *ChunkProposer) TryProposeChunk() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
|
||||
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
if chunk == nil {
|
||||
log.Warn("proposed chunk is nil, cannot update in DB")
|
||||
return nil
|
||||
@@ -78,7 +79,7 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -106,15 +107,6 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx gas limit; block number: %v, gas used: %v, max gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalTxGasUsed,
|
||||
p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
@@ -133,6 +125,16 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for i, block := range blocks[1:] {
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
@@ -165,5 +167,5 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return &bridgeTypes.Chunk{Blocks: blocks}, nil
|
||||
return &types.Chunk{Blocks: blocks}, nil
|
||||
}
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -18,7 +19,7 @@ func testChunkProposer(t *testing.T) {
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -31,8 +32,8 @@ func testChunkProposer(t *testing.T) {
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2},
|
||||
expectedChunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
|
||||
}
|
||||
expectedHash, err := expectedChunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -24,10 +24,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
@@ -41,7 +40,6 @@ type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
batchOrm *orm.Batch
|
||||
|
||||
@@ -91,7 +89,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
l1MessageOrm: l1MessageOrm,
|
||||
l1BlockOrm: l1BlockOrm,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -227,18 +224,16 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount, "RollupEventCount", rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchHashes []string
|
||||
@@ -272,21 +267,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
var msgStatus types.MsgStatus
|
||||
if msg.isSuccessful {
|
||||
msgStatus = types.MsgConfirmed
|
||||
} else {
|
||||
msgStatus = types.MsgFailed
|
||||
}
|
||||
if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -298,11 +278,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
var l1Messages []*orm.L1Message
|
||||
var relayedMessages []relayedMessage
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
@@ -311,7 +290,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
@@ -327,38 +306,12 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L1RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case bridgeAbi.L1FailedRelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L1CommitBatchEventSignature:
|
||||
event := bridgeAbi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -371,7 +324,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -384,5 +337,5 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
}
|
||||
}
|
||||
|
||||
return l1Messages, relayedMessages, rollupEvents, nil
|
||||
return l1Messages, rollupEvents, nil
|
||||
}
|
||||
|
||||
@@ -159,14 +159,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
|
||||
convey.Convey("parse bridge event logs failure", t, func() {
|
||||
targetErr := errors.New("parse log failure")
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
return nil, nil, nil, targetErr
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
return nil, nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
rollupEvents := []rollupEvent{
|
||||
{
|
||||
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
@@ -179,20 +179,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
status: commonTypes.RollupCommitted,
|
||||
},
|
||||
}
|
||||
|
||||
relayedMessageEvents := []relayedMessage{
|
||||
{
|
||||
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
isSuccessful: true,
|
||||
},
|
||||
{
|
||||
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
|
||||
isSuccessful: false,
|
||||
},
|
||||
}
|
||||
return nil, relayedMessageEvents, rollupEvents, nil
|
||||
return nil, rollupEvents, nil
|
||||
})
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
@@ -250,20 +237,6 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
var l2MessageOrm *orm.L2Message
|
||||
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
|
||||
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var l1MessageOrm *orm.L1Message
|
||||
convey.Convey("db save l1 message failure", t, func() {
|
||||
targetErr := errors.New("SaveL1Messages failure")
|
||||
@@ -303,10 +276,9 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -323,102 +295,14 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, l2Messages, 1)
|
||||
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack RelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog RelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -437,10 +321,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -453,10 +336,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
|
||||
@@ -481,10 +363,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -497,10 +378,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
|
||||
|
||||
@@ -2,7 +2,6 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -32,8 +30,6 @@ var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
@@ -46,7 +42,6 @@ type L2WatcherClient struct {
|
||||
|
||||
l2BlockOrm *orm.L2Block
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
@@ -65,11 +60,20 @@ type L2WatcherClient struct {
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
if err != nil || l1msg == nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
|
||||
if err != nil || receipt == nil {
|
||||
log.Warn("get tx from l2 failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
savedHeight = receipt.BlockNumber.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
w := L2WatcherClient{
|
||||
@@ -78,8 +82,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l2MessageOrm: l2MessageOrm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedMsgHeight: savedHeight,
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -126,10 +129,20 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
|
||||
txsData := make([]*gethTypes.TransactionData, len(txs))
|
||||
for i, tx := range txs {
|
||||
v, r, s := tx.RawSignatureValues()
|
||||
|
||||
nonce := tx.Nonce()
|
||||
|
||||
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
|
||||
// does not have this field. Since `L1MessageTx` do not have a nonce,
|
||||
// we reuse this field for storing the queue index.
|
||||
if msg := tx.AsL1MessageTx(); msg != nil {
|
||||
nonce = msg.QueueIndex
|
||||
}
|
||||
|
||||
txsData[i] = &gethTypes.TransactionData{
|
||||
Type: tx.Type(),
|
||||
TxHash: tx.Hash().String(),
|
||||
Nonce: tx.Nonce(),
|
||||
Nonce: nonce,
|
||||
ChainId: (*hexutil.Big)(tx.ChainId()),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: (*hexutil.Big)(tx.GasPrice()),
|
||||
@@ -146,7 +159,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var blocks []*bridgeTypes.WrappedBlock
|
||||
var blocks []*types.WrappedBlock
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
|
||||
@@ -161,7 +174,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &bridgeTypes.WrappedBlock{
|
||||
blocks = append(blocks, &types.WrappedBlock{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
|
||||
@@ -227,17 +240,15 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
bridgeL2MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
log.Info("L2 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount)
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
@@ -254,71 +265,24 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2Messages []orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
var lastAppendMsgHash common.Hash
|
||||
var lastAppendMsgNonce uint64
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case bridgeAbi.L2SentMessageEventSignature:
|
||||
event := bridgeAbi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Error("failed to unpack layer2 SentMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.MessageNonce,
|
||||
event.Message,
|
||||
)
|
||||
|
||||
// `AppendMessage` event is always emitted before `SentMessage` event
|
||||
// So they should always match, just double check
|
||||
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
|
||||
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
|
||||
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
|
||||
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
|
||||
l2Messages = append(l2Messages, orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
Target: event.Target.String(),
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L2RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
return relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -331,7 +295,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
return relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -339,21 +303,9 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L2AppendMessageEventSignature:
|
||||
event := bridgeAbi.L2AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
lastAppendMsgHash = event.MessageHash
|
||||
lastAppendMsgNonce = event.Index.Uint64()
|
||||
bridgeL2MsgsAppendEventsTotalCounter.Inc(1)
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
}
|
||||
|
||||
return l2Messages, relayedMessages, nil
|
||||
return relayedMessages, nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -67,137 +66,6 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
|
||||
}
|
||||
|
||||
func testMonitorBridgeContract(t *testing.T) {
|
||||
wc, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message = []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
// check l1 messages.
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 2
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
var tx *gethTypes.Transaction
|
||||
for i := 0; i < numTransactions; i++ {
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 5
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -244,57 +112,6 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
|
||||
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{
|
||||
bridgeAbi.L2SentMessageEventSignature,
|
||||
},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack SentMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog SentMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2SentMessageEventSignature success", t, func() {
|
||||
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
tmpValue := big.NewInt(1000)
|
||||
tmpMessageNonce := big.NewInt(100)
|
||||
tmpMessage := []byte("test for L2SentMessageEventSignature")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
|
||||
tmpOut.Sender = tmpSendAddr
|
||||
tmpOut.Value = tmpValue
|
||||
tmpOut.Target = tmpTargetAddr
|
||||
tmpOut.MessageNonce = tmpMessageNonce
|
||||
tmpOut.Message = tmpMessage
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, l2Messages)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -314,9 +131,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -329,9 +145,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
@@ -356,9 +171,8 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -371,51 +185,9 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack AppendMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog AppendMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2AppendMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
tmpOut.Index = big.NewInt(100)
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,10 +10,11 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -27,8 +28,8 @@ var (
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
wrappedBlock1 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock2 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
@@ -56,7 +57,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -66,7 +67,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,20 +101,14 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
|
||||
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
|
||||
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
|
||||
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
|
||||
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
|
||||
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
|
||||
|
||||
// Run chunk-proposer test cases.
|
||||
t.Run("TestChunkProposer", testChunkProposer)
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -35,6 +33,7 @@ type Batch struct {
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
|
||||
// proof
|
||||
ChunkProofsReady int16 `json:"chunk_proofs_ready" gorm:"column:chunk_proofs_ready;default:0"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
@@ -107,14 +106,16 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
|
||||
var batch Batch
|
||||
err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error
|
||||
if err != nil {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("proof")
|
||||
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
|
||||
if err := db.Find(&batch).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
err = json.Unmarshal(batch.Proof, &proof)
|
||||
if err != nil {
|
||||
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -134,12 +135,15 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
|
||||
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
return []types.RollupStatus{}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var batches []Batch
|
||||
err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error
|
||||
if err != nil {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("hash, rollup_status")
|
||||
db = db.Where("hash IN ?", hashes)
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -189,7 +193,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
@@ -217,8 +221,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
batchIndex = parentBatch.Index + 1
|
||||
parentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
|
||||
var parentBatchHeader *bridgeTypes.BatchHeader
|
||||
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
var parentBatchHeader *types.BatchHeader
|
||||
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
|
||||
return nil, err
|
||||
@@ -228,7 +232,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
version = parentBatchHeader.Version()
|
||||
}
|
||||
|
||||
batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
|
||||
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch header",
|
||||
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
|
||||
@@ -240,17 +244,18 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: startChunkHash,
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: startChunkHash,
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
ChunkProofsReady: 0,
|
||||
}
|
||||
|
||||
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
|
||||
@@ -267,7 +272,7 @@ func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
|
||||
int(types.ProvingTaskSkipped),
|
||||
int(types.ProvingTaskFailed),
|
||||
}
|
||||
result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
|
||||
result := o.db.WithContext(ctx).Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
|
||||
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
|
||||
if result.Error != nil {
|
||||
return 0, result.Error
|
||||
@@ -303,10 +308,9 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
default:
|
||||
}
|
||||
|
||||
if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -328,7 +332,7 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -362,7 +366,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the block batch proof by hash.
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
// for unit test.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -33,7 +31,7 @@ type Chunk struct {
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
@@ -107,7 +105,7 @@ func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
@@ -193,7 +191,6 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
default:
|
||||
}
|
||||
|
||||
if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
@@ -211,8 +208,5 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
}
|
||||
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
|
||||
if err := db.Update("batch_hash", batchHash).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return db.Update("batch_hash", batchHash).Error
|
||||
}
|
||||
|
||||
@@ -52,6 +52,16 @@ func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
|
||||
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
|
||||
var msg *L1Message
|
||||
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
|
||||
var msgs []L1Message
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Block represents a l2 block in the database.
|
||||
@@ -56,10 +56,12 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) {
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
var l2Blocks []L2Block
|
||||
if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root").
|
||||
Where("chunk_hash IS NULL").
|
||||
Order("number asc").
|
||||
Find(&l2Blocks).Error; err != nil {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Order("number ASC")
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -119,6 +121,8 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
|
||||
var l2Blocks []L2Block
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
type L2Message struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Value string `json:"value" gorm:"column:value"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Calldata string `json:"calldata" gorm:"column:calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:NULL"`
|
||||
Proof string `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
Status int `json:"status" gorm:"column:status;default:1"`
|
||||
}
|
||||
|
||||
// NewL2Message create an L2Message instance
|
||||
func NewL2Message(db *gorm.DB) *L2Message {
|
||||
return &L2Message{db: db}
|
||||
}
|
||||
|
||||
// TableName define the L2Message table name
|
||||
func (*L2Message) TableName() string {
|
||||
return "l2_message"
|
||||
}
|
||||
|
||||
// GetL2Messages fetch list of messages given msg status
|
||||
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
|
||||
var l2MsgList []L2Message
|
||||
db := m.db
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&l2MsgList).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l2MsgList, nil
|
||||
}
|
||||
|
||||
// GetLayer2LatestWatchedHeight returns latest height stored in the table
|
||||
func (m *L2Message) GetLayer2LatestWatchedHeight() (int64, error) {
|
||||
// @note It's not correct, since we may don't have message in some blocks.
|
||||
// But it will only be called at start, some redundancy is acceptable.
|
||||
result := m.db.Model(&L2Message{}).Select("COALESCE(MAX(height), -1)").Row()
|
||||
if result.Err() != nil {
|
||||
return -1, result.Err()
|
||||
}
|
||||
|
||||
var maxNumber int64
|
||||
if err := result.Scan(&maxNumber); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetL2MessageByNonce fetch message by nonce
|
||||
// for unit test
|
||||
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
|
||||
var msg L2Message
|
||||
err := m.db.Where("nonce", nonce).First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := m.db.WithContext(ctx).Create(&messages).Error
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
nonces = append(nonces, msg.Nonce)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateLayer2Status updates message stauts, given message hash
|
||||
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
|
||||
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
|
||||
updateFields := map[string]interface{}{
|
||||
"status": int(status),
|
||||
"layer1_hash": layer1Hash,
|
||||
}
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed migrations/*.sql
|
||||
var embedMigrations embed.FS
|
||||
|
||||
// MigrationsDir migration dir
|
||||
const MigrationsDir string = "migrations"
|
||||
|
||||
func init() {
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
goose.SetSequential(true)
|
||||
goose.SetTableName("scroll_migrations")
|
||||
|
||||
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
|
||||
goose.SetVerbose(verbose)
|
||||
}
|
||||
|
||||
// Migrate migrate db
|
||||
func Migrate(db *sql.DB) error {
|
||||
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
|
||||
}
|
||||
|
||||
// Rollback rollback to the given version
|
||||
func Rollback(db *sql.DB, version *int64) error {
|
||||
if version != nil {
|
||||
return goose.DownTo(db, MigrationsDir, *version)
|
||||
}
|
||||
return goose.Down(db, MigrationsDir)
|
||||
}
|
||||
|
||||
// ResetDB clean and migrate db.
|
||||
func ResetDB(db *sql.DB) error {
|
||||
if err := Rollback(db, new(int64)); err != nil {
|
||||
return err
|
||||
}
|
||||
return Migrate(db)
|
||||
}
|
||||
|
||||
// Current get current version
|
||||
func Current(db *sql.DB) (int64, error) {
|
||||
return goose.GetDBVersion(db)
|
||||
}
|
||||
|
||||
// Status is normal or not
|
||||
func Status(db *sql.DB) error {
|
||||
return goose.Version(db, MigrationsDir)
|
||||
}
|
||||
|
||||
// Create a new migration folder
|
||||
func Create(db *sql.DB, name, migrationType string) error {
|
||||
return goose.Create(db, MigrationsDir, name, migrationType)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
pgDB *sqlx.DB
|
||||
)
|
||||
|
||||
func initEnv(t *testing.T) error {
|
||||
// Start db container.
|
||||
base.RunDBImage(t)
|
||||
|
||||
// Create db orm handler.
|
||||
factory, err := database.NewOrmFactory(base.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pgDB = factory.GetDB()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, int(cur))
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 6, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur > 0)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, version > 0)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur+1 == version)
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_message
|
||||
(
|
||||
nonce BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer2_hash VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
create unique index l2_message_nonce_uindex
|
||||
on l2_message (nonce);
|
||||
|
||||
create index l2_message_height_index
|
||||
on l2_message (height);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_message;
|
||||
-- +goose StatementEnd
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -27,10 +27,10 @@ var (
|
||||
chunkOrm *Chunk
|
||||
batchOrm *Batch
|
||||
|
||||
wrappedBlock1 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock2 *bridgeTypes.WrappedBlock
|
||||
chunk1 *bridgeTypes.Chunk
|
||||
chunk2 *bridgeTypes.Chunk
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
chunk1 *types.Chunk
|
||||
chunk2 *types.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
)
|
||||
@@ -64,28 +64,22 @@ func setupEnv(t *testing.T) {
|
||||
l2BlockOrm = NewL2Block(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read file: %v", err)
|
||||
}
|
||||
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil {
|
||||
t.Fatalf("failed to unmarshal block trace: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read file: %v", err)
|
||||
}
|
||||
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
|
||||
t.Fatalf("failed to unmarshal block trace: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
|
||||
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
|
||||
chunkHash1, err = chunk1.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
|
||||
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
|
||||
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -102,7 +96,7 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
|
||||
@@ -135,9 +129,6 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
@@ -177,35 +168,24 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
|
||||
batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash1 := batchHeader1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
|
||||
batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash2 := batchHeader2.Hash().Hex()
|
||||
assert.Equal(t, hash2, batchHash2)
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
const nonZeroByteGas uint64 = 16
|
||||
const zeroByteGas uint64 = 4
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var lastQueueIndex *uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
lastQueueIndex = &txData.Nonce
|
||||
}
|
||||
}
|
||||
if lastQueueIndex == nil {
|
||||
return 0
|
||||
}
|
||||
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
|
||||
// TODO: cache results
|
||||
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
|
||||
}
|
||||
|
||||
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
|
||||
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
bytes := make([]byte, 60)
|
||||
|
||||
if !w.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
if len(w.Transactions) > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
if numL1Messages > math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
|
||||
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
|
||||
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
|
||||
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
|
||||
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
|
||||
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
|
||||
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
|
||||
// This needs to be adjusted in the future.
|
||||
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
var size uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += uint64(len(txData.Data))
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
|
||||
// TODO: This will need to be adjusted.
|
||||
// The part added here is only the calldata cost,
|
||||
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
var total uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
|
||||
for _, b := range rlpTxData {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
|
||||
for _, b := range txLen {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// L2TxsNum calculates the number of l2 txs.
|
||||
func (w *WrappedBlock) L2TxsNum() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
// InitDB init the db handler
|
||||
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Info),
|
||||
Logger: logger.Default.LogMode(logger.Warn),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bcmd "scroll-tech/bridge/cmd"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -72,8 +71,8 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
@@ -90,7 +89,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -37,7 +36,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var wrappedBlocks []*bridgeTypes.WrappedBlock
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for i := 0; i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
@@ -45,7 +44,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{
|
||||
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
|
||||
@@ -1,236 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// PublicInputHashConfig is the configuration of how to compute the public input hash.
|
||||
type PublicInputHashConfig struct {
|
||||
MaxTxNum int `json:"max_tx_num"`
|
||||
PaddingTxHash common.Hash `json:"padding_tx_hash"`
|
||||
}
|
||||
|
||||
const defaultMaxTxNum = 44
|
||||
|
||||
var defaultPaddingTxHash = [32]byte{}
|
||||
|
||||
// BatchData contains info of batch to be committed.
|
||||
type BatchData struct {
|
||||
Batch abi.IScrollChainBatch
|
||||
TxHashes []common.Hash
|
||||
TotalTxNum uint64
|
||||
TotalL1TxNum uint64
|
||||
TotalL2Gas uint64
|
||||
|
||||
// cache for the BatchHash
|
||||
hash *common.Hash
|
||||
// The config to compute the public input hash, or the block hash.
|
||||
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
|
||||
piCfg *PublicInputHashConfig
|
||||
}
|
||||
|
||||
// Timestamp returns the timestamp of the first block in the BlockData.
|
||||
func (b *BatchData) Timestamp() uint64 {
|
||||
if len(b.Batch.Blocks) == 0 {
|
||||
return 0
|
||||
}
|
||||
return b.Batch.Blocks[0].Timestamp
|
||||
}
|
||||
|
||||
// Hash calculates the hash of this batch.
|
||||
func (b *BatchData) Hash() *common.Hash {
|
||||
if b.hash != nil {
|
||||
return b.hash
|
||||
}
|
||||
|
||||
buf := make([]byte, 8)
|
||||
hasher := crypto.NewKeccakState()
|
||||
|
||||
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
|
||||
// @todo: panic on error here.
|
||||
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
|
||||
|
||||
// 2. hash all block contexts
|
||||
for _, block := range b.Batch.Blocks {
|
||||
// write BlockHash & ParentHash
|
||||
_, _ = hasher.Write(block.BlockHash[:])
|
||||
_, _ = hasher.Write(block.ParentHash[:])
|
||||
// write BlockNumber
|
||||
binary.BigEndian.PutUint64(buf, block.BlockNumber)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write Timestamp
|
||||
binary.BigEndian.PutUint64(buf, block.Timestamp)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write BaseFee
|
||||
var baseFee [32]byte
|
||||
if block.BaseFee != nil {
|
||||
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
|
||||
}
|
||||
_, _ = hasher.Write(baseFee[:])
|
||||
// write GasLimit
|
||||
binary.BigEndian.PutUint64(buf, block.GasLimit)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write NumTransactions
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
// write NumL1Messages
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
}
|
||||
|
||||
// 3. add all tx hashes
|
||||
for _, txHash := range b.TxHashes {
|
||||
_, _ = hasher.Write(txHash[:])
|
||||
}
|
||||
|
||||
// 4. append empty tx hash up to MaxTxNum
|
||||
maxTxNum := defaultMaxTxNum
|
||||
paddingTxHash := common.Hash(defaultPaddingTxHash)
|
||||
if b.piCfg != nil {
|
||||
maxTxNum = b.piCfg.MaxTxNum
|
||||
paddingTxHash = b.piCfg.PaddingTxHash
|
||||
}
|
||||
for i := len(b.TxHashes); i < maxTxNum; i++ {
|
||||
_, _ = hasher.Write(paddingTxHash[:])
|
||||
}
|
||||
|
||||
b.hash = new(common.Hash)
|
||||
_, _ = hasher.Read(b.hash[:])
|
||||
|
||||
return b.hash
|
||||
}
|
||||
|
||||
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
|
||||
// included in this batch
|
||||
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// set BatchIndex, ParentBatchHash
|
||||
batch.BatchIndex = parentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
|
||||
|
||||
var batchTxDataBuf bytes.Buffer
|
||||
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
|
||||
|
||||
for i, block := range blocks {
|
||||
batchData.TotalTxNum += uint64(len(block.Transactions))
|
||||
batchData.TotalL2Gas += block.Header.GasUsed
|
||||
|
||||
// set baseFee to 0 when it's nil in the block header
|
||||
baseFee := block.Header.BaseFee
|
||||
if baseFee == nil {
|
||||
baseFee = big.NewInt(0)
|
||||
}
|
||||
|
||||
batch.Blocks[i] = abi.IScrollChainBlockContext{
|
||||
BlockHash: block.Header.Hash(),
|
||||
ParentHash: block.Header.ParentHash,
|
||||
BlockNumber: block.Header.Number.Uint64(),
|
||||
Timestamp: block.Header.Time,
|
||||
BaseFee: baseFee,
|
||||
GasLimit: block.Header.GasLimit,
|
||||
NumTransactions: uint16(len(block.Transactions)),
|
||||
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
|
||||
}
|
||||
|
||||
// fill in RLP-encoded transactions
|
||||
for _, txData := range block.Transactions {
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
_, _ = batchTxDataWriter.Write(txLen[:])
|
||||
_, _ = batchTxDataWriter.Write(rlpTxData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
|
||||
}
|
||||
|
||||
// set NewStateRoot & WithdrawTrieRoot from the last block
|
||||
if i == len(blocks)-1 {
|
||||
batch.NewStateRoot = block.Header.Root
|
||||
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
|
||||
}
|
||||
}
|
||||
|
||||
if err := batchTxDataWriter.Flush(); err != nil {
|
||||
panic("Buffered I/O flush failed")
|
||||
}
|
||||
|
||||
batch.L2Transactions = batchTxDataBuf.Bytes()
|
||||
batchData.piCfg = piCfg
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// NewGenesisBatchData generates the batch that contains the genesis block.
|
||||
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
|
||||
header := genesisBlockTrace.Header
|
||||
if header.Number.Uint64() != 0 {
|
||||
panic("invalid genesis block trace: block number is not 0")
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// fill in batch information
|
||||
batch.BatchIndex = 0
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
|
||||
batch.NewStateRoot = header.Root
|
||||
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
|
||||
// L2Transactions should be empty
|
||||
|
||||
// fill in block context
|
||||
batch.Blocks[0] = abi.IScrollChainBlockContext{
|
||||
BlockHash: header.Hash(),
|
||||
ParentHash: header.ParentHash,
|
||||
BlockNumber: header.Number.Uint64(),
|
||||
Timestamp: header.Time,
|
||||
BaseFee: header.BaseFee,
|
||||
GasLimit: header.GasLimit,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
|
||||
func newByte32FromBytes(b []byte) [32]byte {
|
||||
var byte32 [32]byte
|
||||
|
||||
if len(b) > 32 {
|
||||
b = b[len(b)-32:]
|
||||
}
|
||||
|
||||
copy(byte32[32-len(b):], b)
|
||||
return byte32
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range chunks {
|
||||
for chunkID, chunk := range chunks {
|
||||
// build data hash
|
||||
totalL1MessagePoppedBeforeChunk := nextIndex
|
||||
chunkHash, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
|
||||
@@ -46,7 +46,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
dataBytes = append(dataBytes, chunkHash.Bytes()...)
|
||||
|
||||
// build skip bitmap
|
||||
for _, block := range chunk.Blocks {
|
||||
for blockID, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
continue
|
||||
@@ -54,7 +54,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
@@ -36,7 +36,7 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// 1 L1 Msg in 1 bitmap
|
||||
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
@@ -54,7 +54,7 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
|
||||
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_05.json")
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
@@ -87,7 +87,7 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many sparse L1 Msgs in 1 bitmap
|
||||
templateBlockTrace4, err := os.ReadFile("../../../common/testdata/blockTrace_06.json")
|
||||
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock4 := &WrappedBlock{}
|
||||
@@ -106,7 +106,7 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many L1 Msgs in each of 2 bitmaps
|
||||
templateBlockTrace5, err := os.ReadFile("../../../common/testdata/blockTrace_07.json")
|
||||
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock5 := &WrappedBlock{}
|
||||
@@ -127,7 +127,7 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
@@ -154,7 +154,7 @@ func TestBatchHeaderEncode(t *testing.T) {
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
@@ -174,7 +174,7 @@ func TestBatchHeaderEncode(t *testing.T) {
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
@@ -199,7 +199,7 @@ func TestBatchHeaderHash(t *testing.T) {
|
||||
hash := batchHeader.Hash()
|
||||
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
@@ -216,7 +216,7 @@ func TestBatchHeaderHash(t *testing.T) {
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
@@ -1,143 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
func TestBatchHash(t *testing.T) {
|
||||
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
|
||||
tx := new(geth_types.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 4,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
|
||||
batch := &batchData.Batch
|
||||
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
|
||||
|
||||
block := abi.IScrollChainBlockContext{
|
||||
BlockNumber: 51966,
|
||||
Timestamp: 123456789,
|
||||
BaseFee: new(big.Int).SetUint64(0),
|
||||
GasLimit: 10000000000000000,
|
||||
NumTransactions: 1,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
batch.Blocks = append(batch.Blocks, block)
|
||||
|
||||
hash := batchData.Hash()
|
||||
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
|
||||
|
||||
// use a different tx hash
|
||||
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
|
||||
tx = new(geth_types.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
batchData.TxHashes[0] = tx.Hash()
|
||||
|
||||
batchData.hash = nil // clear the cache
|
||||
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
|
||||
}
|
||||
|
||||
func TestNewGenesisBatch(t *testing.T) {
|
||||
genesisBlock := &geth_types.Header{
|
||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
|
||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(0),
|
||||
GasLimit: 940000000,
|
||||
GasUsed: 0,
|
||||
Time: 1639724192,
|
||||
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
BaseFee: big.NewInt(1000000000),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
genesisBlock.Hash().Hex(),
|
||||
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
|
||||
"wrong genesis block header",
|
||||
)
|
||||
|
||||
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
|
||||
batchData := NewGenesisBatchData(blockTrace)
|
||||
t.Log(batchData.Batch.Blocks[0])
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 25,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
batchData.Hash().Hex(),
|
||||
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
|
||||
"wrong genesis batch hash",
|
||||
)
|
||||
}
|
||||
|
||||
func TestNewBatchData(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
|
||||
parentBatch := &BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
StateRoot: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData1 := NewBatchData(parentBatch, []*WrappedBlock{wrappedBlock}, nil)
|
||||
assert.NotNil(t, batchData1)
|
||||
assert.NotNil(t, batchData1.Batch)
|
||||
assert.Equal(t, "0xac4487c0d8f429dafda3c68cbb8983ac08af83c03c83c365d7df02864f80af37", batchData1.Hash().Hex())
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
|
||||
|
||||
parentBatch2 := &BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
StateRoot: batchData1.Batch.NewStateRoot.Hex(),
|
||||
}
|
||||
batchData2 := NewBatchData(parentBatch2, []*WrappedBlock{wrappedBlock2}, nil)
|
||||
assert.NotNil(t, batchData2)
|
||||
assert.NotNil(t, batchData2.Batch)
|
||||
assert.Equal(t, "0x8f1447573740b3e75b979879866b8ad02eecf88e1946275eb8cf14ab95876efc", batchData2.Hash().Hex())
|
||||
}
|
||||
|
||||
func TestBatchDataTimestamp(t *testing.T) {
|
||||
// Test case 1: when the batch data contains no blocks.
|
||||
assert.Equal(t, uint64(0), (&BatchData{}).Timestamp())
|
||||
|
||||
// Test case 2: when the batch data contains blocks.
|
||||
batchData := &BatchData{
|
||||
Batch: abi.IScrollChainBatch{
|
||||
Blocks: []abi.IScrollChainBlockContext{
|
||||
{Timestamp: 123456789},
|
||||
{Timestamp: 234567891},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(123456789), batchData.Timestamp())
|
||||
}
|
||||
@@ -6,9 +6,13 @@ import (
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
const nonZeroByteGas uint64 = 16
|
||||
const zeroByteGas uint64 = 4
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
@@ -22,7 +26,7 @@ type WrappedBlock struct {
|
||||
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var lastQueueIndex *uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == 0x7E {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
lastQueueIndex = &txData.Nonce
|
||||
}
|
||||
}
|
||||
@@ -59,3 +63,74 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
|
||||
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
|
||||
// This needs to be adjusted in the future.
|
||||
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
var size uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += uint64(len(txData.Data))
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
|
||||
// TODO: This will need to be adjusted.
|
||||
// The part added here is only the calldata cost,
|
||||
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
var total uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
|
||||
for _, b := range rlpTxData {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
|
||||
for _, b := range txLen {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// L2TxsNum calculates the number of l2 txs.
|
||||
func (w *WrappedBlock) L2TxsNum() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
|
||||
// Test case 3: when the chunk contains one block.
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
@@ -50,7 +50,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
|
||||
|
||||
// Test case 4: when the chunk contains one block with 1 L1MsgTx
|
||||
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
@@ -92,7 +92,7 @@ func TestChunkHash(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: successfully hashing a chunk on one block
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
@@ -106,7 +106,7 @@ func TestChunkHash(t *testing.T) {
|
||||
assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex())
|
||||
|
||||
// Test case 3: successfully hashing a chunk on two blocks
|
||||
templateBlockTrace1, err := os.ReadFile("../../../common/testdata/blockTrace_03.json")
|
||||
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
|
||||
@@ -121,7 +121,7 @@ func TestChunkHash(t *testing.T) {
|
||||
assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex())
|
||||
|
||||
// Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs
|
||||
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
@@ -4,9 +4,6 @@ package types
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
@@ -159,15 +156,6 @@ type RollerStatus struct {
|
||||
Status RollerProveStatus `json:"status"`
|
||||
}
|
||||
|
||||
// SessionInfo is assigned rollers info of a block batch (session)
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
Attempts uint8 `json:"attempts,omitempty"`
|
||||
ProveType message.ProveType `json:"prove_type,omitempty"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
type ProvingStatus int
|
||||
|
||||
@@ -230,44 +218,3 @@ const (
|
||||
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
|
||||
RollupFinalizeFailed
|
||||
)
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
type BlockBatch struct {
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
|
||||
// AggTask is a wrapper type around db AggProveTask type.
|
||||
type AggTask struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
|
||||
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
|
||||
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
|
||||
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
}
|
||||
|
||||
@@ -23,25 +23,25 @@ const (
|
||||
StatusProofError
|
||||
)
|
||||
|
||||
// ProveType represents the type of roller.
|
||||
type ProveType uint8
|
||||
// ProofType represents the type of roller.
|
||||
type ProofType uint8
|
||||
|
||||
func (r ProveType) String() string {
|
||||
func (r ProofType) String() string {
|
||||
switch r {
|
||||
case BasicProve:
|
||||
return "Basic Prove"
|
||||
case AggregatorProve:
|
||||
return "Aggregator Prove"
|
||||
case ProofTypeChunk:
|
||||
return "proof type chunk"
|
||||
case ProofTypeBatch:
|
||||
return "proof type batch"
|
||||
default:
|
||||
return "Illegal Prove type"
|
||||
return "illegal proof type"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BasicProve is default roller, it only generates zk proof from traces.
|
||||
BasicProve ProveType = iota
|
||||
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof.
|
||||
AggregatorProve
|
||||
// ProofTypeChunk is default roller, it only generates zk proof from traces.
|
||||
ProofTypeChunk ProofType = iota
|
||||
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
|
||||
@@ -59,7 +59,7 @@ type Identity struct {
|
||||
// Roller name
|
||||
Name string `json:"name"`
|
||||
// Roller RollerType
|
||||
RollerType ProveType `json:"roller_type,omitempty"`
|
||||
RollerType ProofType `json:"roller_type,omitempty"`
|
||||
// Unverified Unix timestamp of message creation
|
||||
Timestamp uint32 `json:"timestamp"`
|
||||
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
|
||||
@@ -203,7 +203,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
|
||||
// TaskMsg is a wrapper type around db ProveTask type.
|
||||
type TaskMsg struct {
|
||||
ID string `json:"id"`
|
||||
Type ProveType `json:"type,omitempty"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
|
||||
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
|
||||
// Only applicable for aggregator rollers.
|
||||
@@ -214,7 +214,7 @@ type TaskMsg struct {
|
||||
// the proof generation succeeded, and an error message if proof generation failed.
|
||||
type ProofDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type ProveType `json:"type,omitempty"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
Status RespStatus `json:"status"`
|
||||
Proof *AggProof `json:"proof"`
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestGenerateToken(t *testing.T) {
|
||||
func TestIdentityHash(t *testing.T) {
|
||||
identity := &Identity{
|
||||
Name: "testName",
|
||||
RollerType: BasicProve,
|
||||
RollerType: ProofTypeChunk,
|
||||
Timestamp: uint32(1622428800),
|
||||
Version: "testVersion",
|
||||
Token: "testToken",
|
||||
@@ -68,7 +68,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
proofMsg := &ProofMsg{
|
||||
ProofDetail: &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: BasicProve,
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
Proof: []byte("testProof"),
|
||||
@@ -96,7 +96,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
func TestProofDetailHash(t *testing.T) {
|
||||
proofDetail := &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: BasicProve,
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
Proof: []byte("testProof"),
|
||||
@@ -114,14 +114,14 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProveTypeString(t *testing.T) {
|
||||
basicProve := ProveType(0)
|
||||
assert.Equal(t, "Basic Prove", basicProve.String())
|
||||
proofTypeChunk := ProofType(0)
|
||||
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
|
||||
|
||||
aggregatorProve := ProveType(1)
|
||||
assert.Equal(t, "Aggregator Prove", aggregatorProve.String())
|
||||
proofTypeBatch := ProofType(1)
|
||||
assert.Equal(t, "proof type batch", proofTypeBatch.String())
|
||||
|
||||
illegalProve := ProveType(3)
|
||||
assert.Equal(t, "Illegal Prove type", illegalProve.String())
|
||||
illegalProof := ProofType(3)
|
||||
assert.Equal(t, "illegal proof type", illegalProof.String())
|
||||
}
|
||||
|
||||
func TestProofMsgPublicKey(t *testing.T) {
|
||||
@@ -131,7 +131,7 @@ func TestProofMsgPublicKey(t *testing.T) {
|
||||
proofMsg := &ProofMsg{
|
||||
ProofDetail: &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: BasicProve,
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
Proof: []byte("testProof"),
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.0.2"
|
||||
var tag = "v4.0.9"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -105,7 +105,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
|
||||
|
||||
@@ -122,7 +122,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256[] calldata _amounts
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
|
||||
|
||||
@@ -162,7 +162,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_amount > 0, "deposit zero amount");
|
||||
|
||||
address _l2Token = tokenMapping[_token];
|
||||
@@ -200,7 +200,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256[] calldata _amounts,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_tokenIds.length > 0, "no token to deposit");
|
||||
require(_tokenIds.length == _amounts.length, "length mismatch");
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _tokenId
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
|
||||
|
||||
@@ -115,7 +115,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _from,
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
|
||||
|
||||
@@ -155,7 +155,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
address _l2Token = tokenMapping[_token];
|
||||
require(_l2Token != address(0), "no corresponding l2 token");
|
||||
|
||||
@@ -188,7 +188,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_tokenIds.length > 0, "no token to deposit");
|
||||
|
||||
address _l2Token = tokenMapping[_token];
|
||||
|
||||
@@ -95,7 +95,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
|
||||
uint256 _amount,
|
||||
bytes memory _data,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_amount > 0, "deposit zero eth");
|
||||
|
||||
// 1. Extract real sender if this call is from L1GatewayRouter.
|
||||
|
||||
@@ -103,7 +103,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
|
||||
|
||||
@@ -120,7 +120,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256[] calldata _amounts
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
|
||||
|
||||
@@ -160,7 +160,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_amount > 0, "withdraw zero amount");
|
||||
|
||||
address _l1Token = tokenMapping[_token];
|
||||
@@ -198,7 +198,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256[] calldata _amounts,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_tokenIds.length > 0, "no token to withdraw");
|
||||
require(_tokenIds.length == _amounts.length, "length mismatch");
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _tokenId
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
|
||||
|
||||
@@ -113,7 +113,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _from,
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds
|
||||
) external override onlyCallByCounterpart nonReentrant {
|
||||
) external virtual onlyCallByCounterpart nonReentrant {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
require(_l1Token == tokenMapping[_l2Token], "l2 token mismatch");
|
||||
|
||||
@@ -153,7 +153,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
address _l1Token = tokenMapping[_token];
|
||||
require(_l1Token != address(0), "no corresponding l1 token");
|
||||
|
||||
@@ -188,7 +188,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
address _to,
|
||||
uint256[] calldata _tokenIds,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(_tokenIds.length > 0, "no token to withdraw");
|
||||
|
||||
address _l1Token = tokenMapping[_token];
|
||||
|
||||
@@ -88,7 +88,7 @@ contract L2ETHGateway is Initializable, ScrollGatewayBase, IL2ETHGateway {
|
||||
uint256 _amount,
|
||||
bytes memory _data,
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
) internal virtual nonReentrant {
|
||||
require(msg.value > 0, "withdraw zero eth");
|
||||
|
||||
// 1. Extract real sender if this call is from L1GatewayRouter.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@scroll-tech/contracts",
|
||||
"description": "A library for interacting with Scroll contracts.",
|
||||
"version": "0.0.3",
|
||||
"version": "0.0.4",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/scroll-tech/scroll.git"
|
||||
|
||||
@@ -51,10 +51,12 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
PublicKey: pk,
|
||||
}
|
||||
for id, sess := range m.sessions {
|
||||
if _, ok := sess.info.Rollers[pk]; ok {
|
||||
info.ActiveSessionStartTime = time.Unix(sess.info.StartTimestamp, 0)
|
||||
info.ActiveSession = id
|
||||
break
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
if proverTask.ProverPublicKey == pk {
|
||||
info.ActiveSessionStartTime = proverTask.CreatedAt
|
||||
info.ActiveSession = id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, info)
|
||||
@@ -66,14 +68,14 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
now := time.Now()
|
||||
var nameList []string
|
||||
for pk := range sess.info.Rollers {
|
||||
nameList = append(nameList, sess.info.Rollers[pk].Name)
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
nameList = append(nameList, proverTask.ProverName)
|
||||
}
|
||||
info := SessionInfo{
|
||||
ID: sess.info.ID,
|
||||
ID: sess.taskID,
|
||||
Status: status.String(),
|
||||
AssignedRollers: nameList,
|
||||
StartTime: time.Unix(sess.info.StartTimestamp, 0),
|
||||
StartTime: sess.proverTasks[0].CreatedAt,
|
||||
Error: errMsg,
|
||||
}
|
||||
if finished {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
func geneAuthMsg(t *testing.T) *message.AuthMsg {
|
||||
@@ -36,7 +36,7 @@ var rollerManager *Manager
|
||||
func init() {
|
||||
rmConfig := config.RollerManagerConfig{}
|
||||
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
|
||||
rollerManager, _ = New(context.Background(), &rmConfig, nil, nil)
|
||||
rollerManager, _ = New(context.Background(), &rmConfig, nil)
|
||||
}
|
||||
|
||||
func TestManager_RequestToken(t *testing.T) {
|
||||
|
||||
@@ -6,78 +6,69 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// Set up Coordinator app info.
|
||||
app *cli.App
|
||||
)
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator"
|
||||
app.Usage = "The Scroll L2 Coordinator"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorApp)
|
||||
cutils.RegisterSimulation(app, cutils.CoordinatorApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize all coordinator modules.
|
||||
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
rollerManager.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
if err = utils.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Initialize all coordinator modules.
|
||||
rollerManager, err := coordinator.New(subCtx, cfg.RollerManagerConfig, db)
|
||||
defer func() {
|
||||
cancel()
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Start all modules.
|
||||
if err = rollerManager.Start(); err != nil {
|
||||
log.Crit("couldn't start roller manager", "error", err)
|
||||
@@ -86,7 +77,7 @@ func action(ctx *cli.Context) error {
|
||||
apis := rollerManager.APIs()
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
handler, addr, err := cutils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
@@ -103,7 +94,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
// Register api and start ws service.
|
||||
if ctx.Bool(wsEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartWSEndpoint(
|
||||
handler, addr, err := cutils.StartWSEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(wsListenAddrFlag.Name),
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/config"
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
@@ -11,24 +11,23 @@ require (
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
|
||||
golang.org/x/sync v0.1.0
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.2 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -42,7 +41,6 @@ require (
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.3 // indirect
|
||||
@@ -55,7 +53,6 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -22,10 +22,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
@@ -33,32 +30,34 @@ github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
|
||||
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@@ -66,16 +65,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -92,11 +83,10 @@ github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
|
||||
@@ -111,13 +101,14 @@ github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3h
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -125,51 +116,73 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
|
||||
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
|
||||
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
db_config "scroll-tech/database"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -44,7 +42,7 @@ type L2Config struct {
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
|
||||
DBConfig *db_config.DBConfig `json:"db_config"`
|
||||
DBConfig *DBConfig `json:"db_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
}
|
||||
|
||||
@@ -55,6 +53,16 @@ type VerifierConfig struct {
|
||||
AggVkPath string `json:"agg_vk_path"`
|
||||
}
|
||||
|
||||
// DBConfig db config
|
||||
type DBConfig struct {
|
||||
// data source name
|
||||
DSN string `json:"dsn"`
|
||||
DriverName string `json:"driver_name"`
|
||||
|
||||
MaxOpenNum int `json:"maxOpenNum"`
|
||||
MaxIdleNum int `json:"maxIdleNum"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
254
coordinator/internal/orm/batch.go
Normal file
254
coordinator/internal/orm/batch.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const defaultBatchHeaderVersion = 0
|
||||
|
||||
// Batch represents a batch of chunks.
|
||||
type Batch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
|
||||
// proof
|
||||
ChunkProofsReady int16 `json:"chunk_proofs_ready" gorm:"column:chunk_proofs_ready;default:0"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
|
||||
// gas oracle
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBatch creates a new Batch database instance.
|
||||
func NewBatch(db *gorm.DB) *Batch {
|
||||
return &Batch{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Batch model.
|
||||
func (*Batch) TableName() string {
|
||||
return "batch"
|
||||
}
|
||||
|
||||
// GetUnassignedBatches retrieves unassigned batches based on the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var batches []*Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ? AND chunk_proofs_ready = ?", types.ProvingTaskUnassigned, 1)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
|
||||
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
|
||||
var assignedBatches []*Batch
|
||||
err := o.db.WithContext(ctx).
|
||||
Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)}).
|
||||
Find(&assignedBatches).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return assignedBatches, nil
|
||||
}
|
||||
|
||||
// GetProvingStatusByHash retrieves the proving status of a batch given its hash.
|
||||
func (o *Batch) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
|
||||
var batch Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("proving_status")
|
||||
db = db.Where("hash = ?", hash)
|
||||
if err := db.Find(&batch).Error; err != nil {
|
||||
return types.ProvingStatusUndefined, err
|
||||
}
|
||||
return types.ProvingStatus(batch.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetLatestBatch retrieves the latest batch from the database.
|
||||
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
var latestBatch Batch
|
||||
err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &latestBatch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
// for unit test
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get the latest batch", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batchIndex uint64
|
||||
var parentBatchHash common.Hash
|
||||
var totalL1MessagePoppedBefore uint64
|
||||
var version uint8 = defaultBatchHeaderVersion
|
||||
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// not batch record in the db, we then use default empty values for the creating batch;
|
||||
// if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch
|
||||
if parentBatch != nil {
|
||||
batchIndex = parentBatch.Index + 1
|
||||
parentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
|
||||
var parentBatchHeader *types.BatchHeader
|
||||
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
|
||||
version = parentBatchHeader.Version()
|
||||
}
|
||||
|
||||
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch header",
|
||||
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
|
||||
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numChunks := len(chunks)
|
||||
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: startChunkHash,
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
ChunkProofsReady: 0,
|
||||
}
|
||||
|
||||
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
|
||||
log.Error("failed to insert batch", "batch", newBatch, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateChunkProofsStatusByBatchHash updates the status of chunk_proofs_ready field for a given batch hash.
|
||||
// The function will set the chunk_proofs_ready to the status provided.
|
||||
func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHash string, isReady bool) error {
|
||||
var chunkProofsStatus int16
|
||||
if isReady {
|
||||
chunkProofsStatus = 1
|
||||
} else {
|
||||
chunkProofsStatus = 0
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash = ?", batchHash)
|
||||
return db.Update("chunk_proofs_ready", chunkProofsStatus).Error
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
return err
|
||||
}
|
||||
300
coordinator/internal/orm/chunk.go
Normal file
300
coordinator/internal/orm/chunk.go
Normal file
@@ -0,0 +1,300 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Chunk represents a chunk of blocks in the database.
|
||||
type Chunk struct {
|
||||
db *gorm.DB `gorm:"-"`
|
||||
|
||||
// chunk
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
|
||||
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
|
||||
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
|
||||
|
||||
// proof
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewChunk creates a new Chunk database instance.
|
||||
func NewChunk(db *gorm.DB) *Chunk {
|
||||
return &Chunk{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the chunk model.
|
||||
func (*Chunk) TableName() string {
|
||||
return "chunk"
|
||||
}
|
||||
|
||||
// GetUnassignedChunks retrieves unassigned chunks based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunks(ctx context.Context, limit int) ([]*Chunk, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var chunks []*Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetProofsByBatchHash retrieves the proofs associated with a specific batch hash.
|
||||
// It returns a slice of decoded proofs (message.AggProof) obtained from the database.
|
||||
// The returned proofs are sorted in ascending order by their associated chunk index.
|
||||
func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*message.AggProof, error) {
|
||||
var chunks []*Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("batch_hash", batchHash)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proofs []*message.AggProof
|
||||
for _, chunk := range chunks {
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proofs = append(proofs, &proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// GetLatestChunk retrieves the latest chunk from the database.
|
||||
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
var latestChunk Chunk
|
||||
err := o.db.WithContext(ctx).
|
||||
Order("index desc").
|
||||
First(&latestChunk).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &latestChunk, nil
|
||||
}
|
||||
|
||||
// GetProvingStatusByHash retrieves the proving status of a chunk given its hash.
|
||||
func (o *Chunk) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Select("proving_status")
|
||||
db = db.Where("hash = ?", hash)
|
||||
if err := db.Find(&chunk).Error; err != nil {
|
||||
return types.ProvingStatusUndefined, err
|
||||
}
|
||||
return types.ProvingStatus(chunk.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned or types.ProvingTaskProved.
|
||||
func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
var chunks []*Chunk
|
||||
|
||||
err := o.db.WithContext(ctx).Where("proving_status IN (?)", []int{int(types.ProvingTaskAssigned), int(types.ProvingTaskProved)}).
|
||||
Find(&chunks).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// CheckIfBatchChunkProofsAreReady checks if all proofs for all chunks of a given batchHash are collected.
|
||||
func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash string) (bool, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ? AND proving_status != ?", batchHash, types.ProvingTaskVerified)
|
||||
err := db.Count(&count).Error
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
// GetChunkBatchHash retrieves the batchHash of a given chunk.
|
||||
func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("hash = ?", chunkHash)
|
||||
db = db.Select("batch_hash")
|
||||
if err := db.First(&chunk).Error; err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return chunk.BatchHash, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
// for unit test
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
var chunkIndex uint64
|
||||
var totalL1MessagePoppedBefore uint64
|
||||
parentChunk, err := o.GetLatestChunk(ctx)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get latest chunk", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// not chunk record in the db, we then use default empty values for the creating chunk;
|
||||
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
|
||||
if parentChunk != nil {
|
||||
chunkIndex = parentChunk.Index + 1
|
||||
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk
|
||||
}
|
||||
|
||||
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk hash", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var totalL2TxGas uint64
|
||||
var totalL2TxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
Hash: hash.Hex(),
|
||||
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: totalL2TxGas,
|
||||
TotalL2TxNum: totalL2TxNum,
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
}
|
||||
|
||||
if err := db.WithContext(ctx).Create(&newChunk).Error; err != nil {
|
||||
log.Error("failed to insert chunk", "hash", hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &newChunk, nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a chunk.
|
||||
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
return db.Updates(updateFields).Error
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the chunk proof by hash.
|
||||
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
return db.Updates(updateFields).Error
|
||||
}
|
||||
|
||||
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
// for unit test
|
||||
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
|
||||
if err := db.Update("batch_hash", batchHash).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
109
coordinator/internal/orm/l2_block.go
Normal file
109
coordinator/internal/orm/l2_block.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Block represents a l2 block in the database.
|
||||
type L2Block struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Number uint64 `json:"number" gorm:"number"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
|
||||
Header string `json:"header" gorm:"header"`
|
||||
Transactions string `json:"transactions" gorm:"transactions"`
|
||||
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
|
||||
TxNum uint64 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL2Block creates a new L2Block instance.
|
||||
func NewL2Block(db *gorm.DB) *L2Block {
|
||||
return &L2Block{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the name of the "l2_block" table.
|
||||
func (*L2Block) TableName() string {
|
||||
return "l2_block"
|
||||
}
|
||||
|
||||
// GetL2BlocksByChunkHash retrieves the L2 blocks associated with the specified chunk hash.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string) ([]*types.WrappedBlock, error) {
|
||||
var l2Blocks []L2Block
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Where("chunk_hash = ?", chunkHash)
|
||||
db = db.Order("number ASC")
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for _, v := range l2Blocks {
|
||||
var wrappedBlock types.WrappedBlock
|
||||
|
||||
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrappedBlock.Header = &gethTypes.Header{}
|
||||
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
return wrappedBlocks, nil
|
||||
}
|
||||
|
||||
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
|
||||
var l2Blocks []L2Block
|
||||
for _, block := range blocks {
|
||||
header, err := json.Marshal(block.Header)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
txs, err := json.Marshal(block.Transactions)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
ParentHash: block.Header.ParentHash.String(),
|
||||
Transactions: string(txs),
|
||||
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
|
||||
TxNum: uint64(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
Header: string(header),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
}
|
||||
|
||||
if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil {
|
||||
log.Error("failed to insert l2Blocks", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
86
coordinator/internal/orm/orm_test.go
Normal file
86
coordinator/internal/orm/orm_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/utils"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
proverTaskOrm *ProverTask
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
var err error
|
||||
db, err = utils.InitDB(
|
||||
&config.DBConfig{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
proverTaskOrm = NewProverTask(db)
|
||||
}
|
||||
|
||||
func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestProverTaskOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
proverTask := ProverTask{
|
||||
TaskID: "test-hash",
|
||||
ProverName: "roller-0",
|
||||
ProverPublicKey: "0",
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
}
|
||||
|
||||
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
|
||||
assert.NoError(t, err)
|
||||
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(proverTasks))
|
||||
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
|
||||
|
||||
proverTask.ProvingStatus = int16(types.RollerProofValid)
|
||||
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
|
||||
assert.NoError(t, err)
|
||||
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(proverTasks))
|
||||
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
|
||||
}
|
||||
76
coordinator/internal/orm/prover_task.go
Normal file
76
coordinator/internal/orm/prover_task.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// ProverTask is assigned rollers info of chunk/batch proof prover task
|
||||
type ProverTask struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID int64 `json:"id" gorm:"column:id"`
|
||||
TaskID string `json:"task_id" gorm:"column:task_id"`
|
||||
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
|
||||
ProverName string `json:"prover_name" gorm:"column:prover_name"`
|
||||
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
|
||||
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
|
||||
Reward uint64 `json:"reward" gorm:"column:reward;default:0"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// NewProverTask creates a new ProverTask instance.
|
||||
func NewProverTask(db *gorm.DB) *ProverTask {
|
||||
return &ProverTask{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the name of the "prover_task" table.
|
||||
func (*ProverTask) TableName() string {
|
||||
return "prover_task"
|
||||
}
|
||||
|
||||
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
|
||||
// The returned prover task objects are sorted in ascending order by their ids.
|
||||
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var proverTasks []*ProverTask
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("task_id IN ?", hashes)
|
||||
db = db.Order("id asc")
|
||||
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, sessionInfo *ProverTask) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"proving_status"}),
|
||||
})
|
||||
return db.Create(&sessionInfo).Error
|
||||
}
|
||||
|
||||
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
|
||||
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", proofType, taskID, pk)
|
||||
|
||||
return db.Update("proving_status", status).Error
|
||||
}
|
||||
43
coordinator/internal/utils/db.go
Normal file
43
coordinator/internal/utils/db.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// InitDB init the db handler
|
||||
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Warn),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// CloseDB close the db handler. notice the db handler only can close when then program exit.
|
||||
func CloseDB(db *gorm.DB) error {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sqlDB.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,20 +11,19 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/exp/rand"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils/workerpool"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
)
|
||||
|
||||
@@ -50,14 +49,15 @@ const (
|
||||
|
||||
type rollerProofStatus struct {
|
||||
id string
|
||||
typ message.ProveType
|
||||
typ message.ProofType
|
||||
pk string
|
||||
status types.RollerProveStatus
|
||||
}
|
||||
|
||||
// Contains all the information on an ongoing proof generation session.
|
||||
type session struct {
|
||||
info *types.SessionInfo
|
||||
taskID string
|
||||
proverTasks []*orm.ProverTask
|
||||
// finish channel is used to pass the public key of the rollers who finished proving process.
|
||||
finishChan chan rollerProofStatus
|
||||
}
|
||||
@@ -90,11 +90,11 @@ type Manager struct {
|
||||
// incoming proofs.
|
||||
verifier *verifier.Verifier
|
||||
|
||||
// db interface
|
||||
orm database.OrmFactory
|
||||
|
||||
// l2geth client
|
||||
*ethclient.Client
|
||||
// orm interface
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
// Token cache
|
||||
tokenCache *cache.Cache
|
||||
@@ -107,7 +107,7 @@ type Manager struct {
|
||||
|
||||
// New returns a new instance of Manager. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `manager.Start`.
|
||||
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory, client *ethclient.Client) (*Manager, error) {
|
||||
func New(ctx context.Context, cfg *config.RollerManagerConfig, db *gorm.DB) (*Manager, error) {
|
||||
v, err := verifier.NewVerifier(cfg.Verifier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,8 +121,10 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
|
||||
sessions: make(map[string]*session),
|
||||
failedSessionInfos: make(map[string]*SessionInfo),
|
||||
verifier: v,
|
||||
orm: orm,
|
||||
Client: client,
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
|
||||
verifierWorkerPool: workerpool.NewWorkerPool(cfg.MaxVerifierWorkers),
|
||||
}, nil
|
||||
@@ -161,48 +163,42 @@ func (m *Manager) isRunning() bool {
|
||||
// Loop keeps the manager running.
|
||||
func (m *Manager) Loop() {
|
||||
var (
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
tasks []*types.BlockBatch
|
||||
aggTasks []*types.AggTask
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
chunkTasks []*orm.Chunk
|
||||
batchTasks []*orm.Batch
|
||||
)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// load and send aggregator tasks
|
||||
if len(aggTasks) == 0 && m.orm != nil {
|
||||
// load and send batch tasks
|
||||
if len(batchTasks) == 0 {
|
||||
var err error
|
||||
aggTasks, err = m.orm.GetUnassignedAggTasks()
|
||||
batchTasks, err = m.batchOrm.GetUnassignedBatches(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned aggregator proving tasks", "error", err)
|
||||
log.Error("failed to get unassigned batch proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select aggregator type roller and send message
|
||||
for len(aggTasks) > 0 && m.StartAggProofGenerationSession(aggTasks[0], nil) {
|
||||
aggTasks = aggTasks[1:]
|
||||
// Select batch type roller and send message
|
||||
for len(batchTasks) > 0 && m.StartBatchProofGenerationSession(batchTasks[0], nil) {
|
||||
batchTasks = batchTasks[1:]
|
||||
}
|
||||
|
||||
// load and send basic tasks
|
||||
if len(tasks) == 0 && m.orm != nil {
|
||||
var err error
|
||||
// load and send chunk tasks
|
||||
if len(chunkTasks) == 0 {
|
||||
// TODO: add cache
|
||||
if tasks, err = m.orm.GetBlockBatches(
|
||||
map[string]interface{}{"proving_status": types.ProvingTaskUnassigned},
|
||||
fmt.Sprintf(
|
||||
"ORDER BY index %s LIMIT %d;",
|
||||
m.cfg.OrderSession,
|
||||
m.GetNumberOfIdleRollers(message.BasicProve),
|
||||
),
|
||||
); err != nil {
|
||||
log.Error("failed to get unassigned basic proving tasks", "error", err)
|
||||
var err error
|
||||
chunkTasks, err = m.chunkOrm.GetUnassignedChunks(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select basic type roller and send message
|
||||
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
|
||||
tasks = tasks[1:]
|
||||
// Select chunk type roller and send message
|
||||
for len(chunkTasks) > 0 && m.StartChunkProofGenerationSession(chunkTasks[0], nil) {
|
||||
chunkTasks = chunkTasks[1:]
|
||||
}
|
||||
case <-m.ctx.Done():
|
||||
if m.ctx.Err() != nil {
|
||||
@@ -217,58 +213,50 @@ func (m *Manager) Loop() {
|
||||
}
|
||||
|
||||
func (m *Manager) restorePrevSessions() {
|
||||
// m.orm may be nil in scroll tests
|
||||
if m.orm == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var hashes []string
|
||||
// load assigned aggregator tasks from db
|
||||
aggTasks, err := m.orm.GetAssignedAggTasks()
|
||||
// load assigned batch tasks from db
|
||||
batchTasks, err := m.batchOrm.GetAssignedBatches(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to load assigned aggregator tasks from db", "error", err)
|
||||
log.Error("failed to load assigned batch tasks from db", "error", err)
|
||||
return
|
||||
}
|
||||
for _, aggTask := range aggTasks {
|
||||
hashes = append(hashes, aggTask.ID)
|
||||
for _, batchTask := range batchTasks {
|
||||
hashes = append(hashes, batchTask.Hash)
|
||||
}
|
||||
// load assigned basic tasks from db
|
||||
batchHashes, err := m.orm.GetAssignedBatchHashes()
|
||||
// load assigned chunk tasks from db
|
||||
chunkTasks, err := m.chunkOrm.GetAssignedChunks(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get assigned batch batchHashes from db", "error", err)
|
||||
return
|
||||
}
|
||||
hashes = append(hashes, batchHashes...)
|
||||
|
||||
prevSessions, err := m.orm.GetSessionInfosByHashes(hashes)
|
||||
for _, chunkTask := range chunkTasks {
|
||||
hashes = append(hashes, chunkTask.Hash)
|
||||
}
|
||||
prevSessions, err := m.proverTaskOrm.GetProverTasksByHashes(m.ctx, hashes)
|
||||
if err != nil {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
proverTasksMaps := make(map[string][]*orm.ProverTask)
|
||||
for _, v := range prevSessions {
|
||||
sess := &session{
|
||||
info: v,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"prove type", sess.info.ProveType,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
go m.CollectProofs(sess)
|
||||
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
|
||||
v.ProverName, "proof type", v.TaskType, "public key", v.ProverPublicKey, "proof status", v.ProvingStatus)
|
||||
proverTasksMaps[v.TaskID] = append(proverTasksMaps[v.TaskID], v)
|
||||
}
|
||||
|
||||
for taskID, proverTasks := range proverTasksMaps {
|
||||
sess := &session{
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[taskID] = sess
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleZkProof handle a ZkProof submitted from a roller.
|
||||
@@ -287,49 +275,53 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
|
||||
}
|
||||
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
for _, si := range sess.proverTasks {
|
||||
// get the send session info of this proof msg
|
||||
if si.TaskID == msg.ID && si.ProverPublicKey == pk {
|
||||
proverTask = si
|
||||
}
|
||||
}
|
||||
|
||||
if proverTask == nil {
|
||||
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == types.RollerProofValid {
|
||||
// Ensure this roller is eligible to participate in the prover task.
|
||||
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey,
|
||||
"proof type", proverTask.TaskType,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
log.Info(
|
||||
"handling zk proof",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"proof time", proofTimeSec,
|
||||
)
|
||||
|
||||
log.Info("handling zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName, "roller pk",
|
||||
proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
defer func() {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
// Roll back current proof's status.
|
||||
if dbErr != nil {
|
||||
if msg.Type == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset basic task status as Unassigned", "msg.ID", msg.ID)
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset chunk task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset aggregator task status as Unassigned", "msg.ID", msg.ID)
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset batch task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -344,13 +336,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", msg.Type,
|
||||
"roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey,
|
||||
"proof type", msg.Type,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
)
|
||||
@@ -358,19 +350,23 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
// store proof content
|
||||
if msg.Type == message.BasicProve {
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store basic proof into db", "error", dbErr)
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if dbErr = m.chunkOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store chunk proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update basic task status as proved", "error", dbErr)
|
||||
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update chunk task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if dbErr = m.orm.UpdateProofForAggTask(msg.ID, msg.Proof); dbErr != nil {
|
||||
log.Error("failed to store aggregator proof into db", "error", dbErr)
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if dbErr = m.batchOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store batch proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update batch task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
@@ -378,31 +374,35 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
var verifyErr error
|
||||
// TODO: wrap both basic verifier and aggregator verifier
|
||||
// TODO: wrap both chunk verifier and batch verifier
|
||||
success, verifyErr = m.verifyProof(msg.Proof)
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
}
|
||||
|
||||
if success {
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if dbErr = m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update aggregator proving_status",
|
||||
"failed to update chunk proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if err := m.checkAreAllChunkProofsReady(msg.ID); err != nil {
|
||||
log.Error("failed to check are all chunk proofs ready", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if msg.Type == message.BasicProve {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update basic proving_status",
|
||||
"failed to update batch proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
@@ -411,18 +411,51 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) checkAreAllChunkProofsReady(chunkHash string) error {
|
||||
batchHash, err := m.chunkOrm.GetChunkBatchHash(m.ctx, chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(m.ctx, batchHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if allReady {
|
||||
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(m.ctx, batchHash, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAttempts use the count of prover task info to check the attempts
|
||||
func (m *Manager) checkAttemptsExceeded(hash string) bool {
|
||||
proverTasks, err := m.proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{hash})
|
||||
if err != nil {
|
||||
log.Error("get session info error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(proverTasks) >= int(m.cfg.SessionAttempts) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
coordinatorSessionsActiveNumberGauge.Inc(1)
|
||||
@@ -432,48 +465,47 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
select {
|
||||
//Execute after timeout, set in config.json. Consider all rollers failed.
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
// Check if session can be replayed
|
||||
if sess.info.Attempts < m.cfg.SessionAttempts {
|
||||
if !m.checkAttemptsExceeded(sess.taskID) {
|
||||
var success bool
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
success = m.StartAggProofGenerationSession(nil, sess)
|
||||
} else if sess.info.ProveType == message.BasicProve {
|
||||
success = m.StartBasicProofGenerationSession(nil, sess)
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
|
||||
success = m.StartBatchProofGenerationSession(nil, sess)
|
||||
} else if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
|
||||
success = m.StartChunkProofGenerationSession(nil, sess)
|
||||
}
|
||||
if success {
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
log.Info("Retrying session", "session id:", sess.info.ID)
|
||||
log.Info("Retrying session", "session id:", sess.taskID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
log.Warn(errMsg, "session id", sess.taskID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if sess.info.ProveType == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset chunk task_status as Unassigned", "task id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset batch task_status as Unassigned", "task id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
delete(m.sessions, sess.taskID)
|
||||
m.mu.Unlock()
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
return
|
||||
@@ -481,24 +513,31 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
if sess.isSessionFailed() {
|
||||
if ret.typ == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update basic proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
if ret.typ == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
for idx := range sess.proverTasks {
|
||||
if sess.proverTasks[idx].ProverPublicKey == ret.pk {
|
||||
sess.proverTasks[idx].ProvingStatus = int16(ret.status)
|
||||
}
|
||||
}
|
||||
|
||||
if sess.isSessionFailed() {
|
||||
if ret.typ == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
if ret.typ == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
|
||||
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
|
||||
log.Error("failed to update session info proving status",
|
||||
"proof type", ret.typ, "task id", ret.id, "pk", ret.pk, "status", ret.status, "error", err)
|
||||
}
|
||||
|
||||
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
|
||||
finished, validRollers := sess.isRollersFinished()
|
||||
|
||||
@@ -508,11 +547,10 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
randIndex := rand.Int63n(int64(len(validRollers)))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(proverTask.ProverPublicKey, proverTask.TaskID)
|
||||
delete(m.sessions, proverTask.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
|
||||
coordinatorSessionsSuccessTotalCounter.Inc(1)
|
||||
@@ -528,14 +566,16 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
|
||||
func (s *session) isRollersFinished() (bool, []string) {
|
||||
var validRollers []string
|
||||
for pk, roller := range s.info.Rollers {
|
||||
if roller.Status == types.RollerProofValid {
|
||||
validRollers = append(validRollers, pk)
|
||||
for _, sessionInfo := range s.proverTasks {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
validRollers = append(validRollers, sessionInfo.ProverPublicKey)
|
||||
continue
|
||||
}
|
||||
if roller.Status == types.RollerProofInvalid {
|
||||
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
|
||||
continue
|
||||
}
|
||||
|
||||
// Some rollers are still proving.
|
||||
return false, nil
|
||||
}
|
||||
@@ -543,8 +583,8 @@ func (s *session) isRollersFinished() (bool, []string) {
|
||||
}
|
||||
|
||||
func (s *session) isSessionFailed() bool {
|
||||
for _, roller := range s.info.Rollers {
|
||||
if roller.Status != types.RollerProofInvalid {
|
||||
for _, sessionInfo := range s.proverTasks {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -567,108 +607,100 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
}
|
||||
|
||||
// StartBasicProofGenerationSession starts a basic proof generation session
|
||||
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
// StartChunkProofGenerationSession starts a chunk proof generation session
|
||||
func (m *Manager) StartChunkProofGenerationSession(task *orm.Chunk, prevSession *session) (success bool) {
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
|
||||
if m.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
log.Warn("no idle chunk roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start basic proof generation session", "id", taskID)
|
||||
log.Info("start chunk proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskFailed); err != nil {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get block traces.
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": taskID})
|
||||
// Get block hashes.
|
||||
wrappedBlocks, err := m.l2BlockOrm.GetL2BlocksByChunkHash(m.ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"could not GetBlockInfos",
|
||||
"batch_hash", taskID,
|
||||
"Failed to fetch wrapped blocks",
|
||||
"batch hash", taskID,
|
||||
"error", err,
|
||||
)
|
||||
return false
|
||||
}
|
||||
blockHashes := make([]common.Hash, len(blockInfos))
|
||||
for i, blockInfo := range blockInfos {
|
||||
blockHashes[i] = common.HexToHash(blockInfo.Hash)
|
||||
blockHashes := make([]common.Hash, len(wrappedBlocks))
|
||||
for i, wrappedBlock := range wrappedBlocks {
|
||||
blockHashes[i] = wrappedBlock.Header.Hash()
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
// Dispatch task to chunk rollers.
|
||||
var proverTasks []*orm.ProverTask
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.BasicProve)
|
||||
roller := m.selectRoller(message.ProofTypeChunk)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.BasicProve, BlockHashes: blockHashes}) {
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.ProofTypeChunk, BlockHashes: blockHashes}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: taskID,
|
||||
ProverPublicKey: roller.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: roller.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
// Store prover task info.
|
||||
if err = m.proverTaskOrm.SetProverTask(m.ctx, &proverTask); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
proverTasks = append(proverTasks, &proverTask)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeChunk, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
|
||||
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
if len(proverTasks) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle chunk rollers", m.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateProvingStatus(taskID, types.ProvingTaskAssigned); err != nil {
|
||||
if err = m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.BasicProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -679,27 +711,27 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
return true
|
||||
}
|
||||
|
||||
// StartAggProofGenerationSession starts an aggregator proof generation.
|
||||
func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSession *session) (success bool) {
|
||||
// StartBatchProofGenerationSession starts an batch proof generation.
|
||||
func (m *Manager) StartBatchProofGenerationSession(task *orm.Batch, prevSession *session) (success bool) {
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskID = task.ID
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
|
||||
if m.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start aggregator proof generation session", "id", taskID)
|
||||
log.Info("start batch proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
} else if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskFailed); err != nil {
|
||||
} else if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
@@ -707,17 +739,17 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
|
||||
}()
|
||||
|
||||
// get agg task from db
|
||||
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskID)
|
||||
// get chunk proofs from db
|
||||
chunkProofs, err := m.chunkOrm.GetProofsByBatchHash(m.ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error("failed to get sub proofs for aggregator task", "id", taskID, "error", err)
|
||||
log.Error("failed to get chunk proofs for batch task", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
// Dispatch task to chunk rollers.
|
||||
var proverTasks []*orm.ProverTask
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.AggregatorProve)
|
||||
roller := m.selectRoller(message.ProofTypeBatch)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
@@ -726,56 +758,49 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{
|
||||
ID: taskID,
|
||||
Type: message.AggregatorProve,
|
||||
SubProofs: subProofs,
|
||||
Type: message.ProofTypeBatch,
|
||||
SubProofs: chunkProofs,
|
||||
}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: taskID,
|
||||
ProverPublicKey: roller.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: roller.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.proverTaskOrm.SetProverTask(context.Background(), &proverTask); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
proverTasks = append(proverTasks, &proverTask)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeBatch, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
if len(proverTasks) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle batch rollers", m.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskAssigned); err != nil {
|
||||
if err = m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.AggregatorProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -789,7 +814,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
func (m *Manager) addFailedSession(sess *session, errMsg string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
|
||||
@@ -22,24 +22,36 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
coordinator_config "scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/internal/utils"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
batchData *types.BatchData
|
||||
dbCfg *config.DBConfig
|
||||
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
chunk *types.Chunk
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -53,31 +65,48 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) (err error) {
|
||||
func setEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg = &config.DBConfig{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
|
||||
var err error
|
||||
db, err = utils.InitDB(dbCfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batchOrm = orm.NewBatch(db)
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
l2BlockOrm = orm.NewL2Block(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock := &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
|
||||
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return
|
||||
chunk = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}}
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
base = docker.NewDockerApp()
|
||||
assert.True(t, assert.NoError(t, setEnv(t)), "failed to setup the test environment.")
|
||||
setEnv(t)
|
||||
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
t.Run("TestFailedHandshake", testFailedHandshake)
|
||||
@@ -97,30 +126,28 @@ func TestApis(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
defer roller.close()
|
||||
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
|
||||
defer roller1.close()
|
||||
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
|
||||
defer roller2.close()
|
||||
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
}
|
||||
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -174,37 +201,36 @@ func testFailedHandshake(t *testing.T) {
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
}
|
||||
|
||||
func testSeveralConnections(t *testing.T) {
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
var (
|
||||
batch = 100
|
||||
batch = 200
|
||||
eg = errgroup.Group{}
|
||||
rollers = make([]*mockRoller, batch)
|
||||
)
|
||||
for i := 0; i < batch; i++ {
|
||||
for i := 0; i < batch; i += 2 {
|
||||
idx := i
|
||||
eg.Go(func() error {
|
||||
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL)
|
||||
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL, message.ProofTypeChunk)
|
||||
rollers[idx+1] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx+1), wsURL, message.ProofTypeBatch)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
assert.NoError(t, eg.Wait())
|
||||
|
||||
// check roller's idle connections
|
||||
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
@@ -218,7 +244,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
if rollerManager.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
if rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -229,60 +255,65 @@ func testSeveralConnections(t *testing.T) {
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
rollers := make([]*mockRoller, 6)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
// only roller 0 submits valid proof.
|
||||
proofStatus := verifiedSuccess
|
||||
if i > 0 {
|
||||
proofStatus = generatedFailed
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
|
||||
|
||||
// only roller 0 & 1 submit valid proofs.
|
||||
proofStatus := generatedFailed
|
||||
if i <= 1 {
|
||||
proofStatus = verifiedSuccess
|
||||
}
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -292,24 +323,24 @@ func testValidProof(t *testing.T) {
|
||||
}
|
||||
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
rollers := make([]*mockRoller, 6)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
|
||||
}
|
||||
defer func() {
|
||||
@@ -318,29 +349,32 @@ func testInvalidProof(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskFailed {
|
||||
hashes = hashes[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -350,24 +384,24 @@ func testInvalidProof(t *testing.T) {
|
||||
}
|
||||
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
rollers := make([]*mockRoller, 6)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
|
||||
}
|
||||
defer func() {
|
||||
@@ -376,29 +410,32 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskFailed {
|
||||
hashes = hashes[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -408,87 +445,80 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
}
|
||||
|
||||
func testTimedoutProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create first mock roller, that will not send any proof.
|
||||
roller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL)
|
||||
// create first chunk & batch mock roller, that will not send any proof.
|
||||
chunkRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL, message.ProofTypeChunk)
|
||||
batchRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL, message.ProofTypeBatch)
|
||||
defer func() {
|
||||
// close connection
|
||||
roller1.close()
|
||||
chunkRoller1.close()
|
||||
batchRoller1.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
var (
|
||||
hashesAssigned = make([]string, 1)
|
||||
hashesVerified = make([]string, 1)
|
||||
)
|
||||
dbTx, err := l2db.Beginx()
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, true)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashesAssigned {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashesAssigned[i] = batchData.Hash().Hex()
|
||||
hashesVerified[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status, it should be assigned, because roller didn't send any proof
|
||||
ok := utils.TryTimes(30, func() bool {
|
||||
status, err := l2db.GetProvingStatusByHash(hashesAssigned[0])
|
||||
ok := cutils.TryTimes(30, func() bool {
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if status == types.ProvingTaskAssigned {
|
||||
hashesAssigned = hashesAssigned[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(hashesAssigned) == 0
|
||||
return chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned
|
||||
})
|
||||
assert.Falsef(t, !ok, "failed to check proof status")
|
||||
|
||||
// create second mock roller, that will send valid proof.
|
||||
roller2 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL)
|
||||
roller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
chunkRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(2), wsURL, message.ProofTypeChunk)
|
||||
chunkRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
batchRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(3), wsURL, message.ProofTypeBatch)
|
||||
batchRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
defer func() {
|
||||
// close connection
|
||||
roller2.close()
|
||||
chunkRoller2.close()
|
||||
batchRoller2.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
// verify proof status, it should be verified now, because second roller sent valid proof
|
||||
ok = utils.TryTimes(200, func() bool {
|
||||
status, err := l2db.GetProvingStatusByHash(hashesVerified[0])
|
||||
ok = cutils.TryTimes(200, func() bool {
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashesVerified = hashesVerified[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(hashesVerified) == 0
|
||||
return chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified
|
||||
})
|
||||
assert.Falsef(t, !ok, "failed to check proof status")
|
||||
}
|
||||
|
||||
func testIdleRollerSelection(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -497,7 +527,13 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 20)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
}
|
||||
defer func() {
|
||||
@@ -507,29 +543,32 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -539,77 +578,84 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGracefulRestart(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
chunkRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
|
||||
batchRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
|
||||
// wait 10 seconds, coordinator restarts before roller submits proof
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
|
||||
chunkRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
|
||||
batchRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
|
||||
|
||||
// wait for coordinator to dispatch task
|
||||
<-time.After(5 * time.Second)
|
||||
// the coordinator will delete the roller if the subscription is closed.
|
||||
roller.close()
|
||||
chunkRoller.close()
|
||||
batchRoller.close()
|
||||
|
||||
info, err := rollerManager.GetSessionInfo(dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
|
||||
// Close rollerManager and ws handler.
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
|
||||
// Setup new coordinator and ws server.
|
||||
newRollerManager, newHandler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
newRollerManager, newHandler := setupCoordinator(t, 1, wsURL, false)
|
||||
defer func() {
|
||||
newHandler.Shutdown(context.Background())
|
||||
newRollerManager.Stop()
|
||||
}()
|
||||
|
||||
for i := range hashes {
|
||||
info, err := newRollerManager.GetSessionInfo(hashes[i])
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
assert.NoError(t, err)
|
||||
info, err = newRollerManager.GetSessionInfo(dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
|
||||
// at this point, roller haven't submitted
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[i])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned, status)
|
||||
}
|
||||
// at this point, roller haven't submitted
|
||||
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned, status)
|
||||
status, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, status) // chunk proofs not ready yet
|
||||
|
||||
// will overwrite the roller client for `SubmitProof`
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, verifiedSuccess)
|
||||
defer roller.close()
|
||||
chunkRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
|
||||
batchRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
|
||||
defer func() {
|
||||
chunkRoller.close()
|
||||
batchRoller.close()
|
||||
}()
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(15 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
// this proves that the roller submits to the new coordinator,
|
||||
// because the roller client for `submitProof` has been overwritten
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
|
||||
return
|
||||
}
|
||||
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
@@ -618,12 +664,9 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
func testListRollers(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -633,11 +676,13 @@ func testListRollers(t *testing.T) {
|
||||
"roller_test_1",
|
||||
"roller_test_2",
|
||||
"roller_test_3",
|
||||
"roller_test_4",
|
||||
}
|
||||
|
||||
roller1 := newMockRoller(t, names[0], wsURL)
|
||||
roller2 := newMockRoller(t, names[1], wsURL)
|
||||
roller3 := newMockRoller(t, names[2], wsURL)
|
||||
roller1 := newMockRoller(t, names[0], wsURL, message.ProofTypeChunk)
|
||||
roller2 := newMockRoller(t, names[1], wsURL, message.ProofTypeBatch)
|
||||
roller3 := newMockRoller(t, names[2], wsURL, message.ProofTypeChunk)
|
||||
roller4 := newMockRoller(t, names[3], wsURL, message.ProofTypeBatch)
|
||||
defer func() {
|
||||
roller1.close()
|
||||
roller2.close()
|
||||
@@ -653,8 +698,9 @@ func testListRollers(t *testing.T) {
|
||||
sort.Strings(rollersName)
|
||||
assert.True(t, reflect.DeepEqual(names, rollersName))
|
||||
|
||||
// test ListRollers if one roller closed.
|
||||
// test ListRollers if two rollers closed.
|
||||
roller3.close()
|
||||
roller4.close()
|
||||
// wait coordinator free completely
|
||||
time.Sleep(time.Second * 5)
|
||||
|
||||
@@ -668,24 +714,28 @@ func testListRollers(t *testing.T) {
|
||||
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
// Get db handler.
|
||||
db, err := database.NewOrmFactory(dbCfg)
|
||||
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
|
||||
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
db, err := utils.InitDB(dbCfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
if resetDB {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
}
|
||||
|
||||
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
|
||||
rollerManager, err = coordinator.New(context.Background(), &config.RollerManagerConfig{
|
||||
RollersPerSession: rollersPerSession,
|
||||
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 5,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 2,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
// start ws service
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
|
||||
handler, _, err = cutils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return rollerManager, handler
|
||||
@@ -694,6 +744,7 @@ func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession
|
||||
type mockRoller struct {
|
||||
rollerName string
|
||||
privKey *ecdsa.PrivateKey
|
||||
proofType message.ProofType
|
||||
|
||||
wsURL string
|
||||
client *client2.Client
|
||||
@@ -705,13 +756,14 @@ type mockRoller struct {
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newMockRoller(t *testing.T, rollerName string, wsURL string) *mockRoller {
|
||||
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
roller := &mockRoller{
|
||||
rollerName: rollerName,
|
||||
privKey: privKey,
|
||||
proofType: proofType,
|
||||
wsURL: wsURL,
|
||||
taskCh: make(chan *message.TaskMsg, 4),
|
||||
stopCh: make(chan struct{}),
|
||||
@@ -733,8 +785,9 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
|
||||
// create a new ws connection
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: r.rollerName,
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
Name: r.rollerName,
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
RollerType: r.proofType,
|
||||
},
|
||||
}
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
@@ -804,6 +857,7 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: task.ID,
|
||||
Type: r.proofType,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
|
||||
@@ -16,7 +16,7 @@ type rollerMetrics struct {
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
@@ -25,7 +25,7 @@ func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string)
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string)
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
|
||||
}
|
||||
@@ -52,7 +52,7 @@ func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d t
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type rollerNode struct {
|
||||
// Roller name
|
||||
Name string
|
||||
// Roller type
|
||||
Type message.ProveType
|
||||
Type message.ProofType
|
||||
// Roller public key
|
||||
PublicKey string
|
||||
// Roller version
|
||||
@@ -34,7 +34,7 @@ type rollerNode struct {
|
||||
// Time of message creation
|
||||
registerTime time.Time
|
||||
|
||||
*rollerMetrics
|
||||
metrics *rollerMetrics
|
||||
}
|
||||
|
||||
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
|
||||
@@ -53,8 +53,8 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
|
||||
defer m.mu.RUnlock()
|
||||
taskIDs := cmap.New()
|
||||
for id, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == pubkey && roller.Status == types.RollerAssigned {
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
if proverTask.ProverPublicKey == pubkey && proverTask.ProvingStatus == int16(types.RollerAssigned) {
|
||||
taskIDs.Set(id, struct{}{})
|
||||
}
|
||||
}
|
||||
@@ -74,20 +74,20 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
|
||||
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
}
|
||||
node = &rollerNode{
|
||||
Name: identity.Name,
|
||||
Type: identity.RollerType,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
rollerMetrics: rMs,
|
||||
Name: identity.Name,
|
||||
Type: identity.RollerType,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
metrics: rMs,
|
||||
}
|
||||
m.rollerPool.Set(pubkey, node)
|
||||
}
|
||||
roller := node.(*rollerNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(roller.registerTime) < 60 {
|
||||
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
|
||||
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
|
||||
return nil, fmt.Errorf("roller reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
@@ -116,7 +116,7 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
|
||||
}
|
||||
|
||||
// GetNumberOfIdleRollers return the count of idle rollers.
|
||||
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count int) {
|
||||
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
|
||||
for _, pk := range m.rollerPool.Keys() {
|
||||
if val, ok := m.rollerPool.Get(pk); ok {
|
||||
r := val.(*rollerNode)
|
||||
@@ -128,7 +128,7 @@ func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count in
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *Manager) selectRoller(rollerType message.ProveType) *rollerNode {
|
||||
func (m *Manager) selectRoller(rollerType message.ProofType) *rollerNode {
|
||||
pubkeys := m.rollerPool.Keys()
|
||||
for len(pubkeys) > 0 {
|
||||
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
|
||||
|
||||
@@ -5,7 +5,7 @@ package verifier
|
||||
import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -5,7 +5,6 @@ go 1.19
|
||||
require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/mattn/go-sqlite3 v1.14.14
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/stretchr/testify v1.8.2
|
||||
@@ -19,11 +18,11 @@ require (
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
||||
@@ -52,8 +52,6 @@ github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bC
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
|
||||
@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 7, int(cur))
|
||||
assert.Equal(t, 6, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
-- TODO: use foreign key for batch_id?
|
||||
-- TODO: why tx_num is bigint?
|
||||
create table block_trace
|
||||
(
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
trace JSON NOT NULL,
|
||||
batch_hash VARCHAR DEFAULT NULL,
|
||||
tx_num INTEGER NOT NULL,
|
||||
gas_used BIGINT NOT NULL,
|
||||
block_timestamp NUMERIC NOT NULL
|
||||
);
|
||||
|
||||
create unique index block_trace_hash_uindex
|
||||
on block_trace (hash);
|
||||
|
||||
create unique index block_trace_number_uindex
|
||||
on block_trace (number);
|
||||
|
||||
create unique index block_trace_parent_uindex
|
||||
on block_trace (number, parent_hash);
|
||||
|
||||
create unique index block_trace_parent_hash_uindex
|
||||
on block_trace (hash, parent_hash);
|
||||
|
||||
create index block_trace_batch_hash_index
|
||||
on block_trace (batch_hash);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists block_trace;
|
||||
-- +goose StatementEnd
|
||||
@@ -12,7 +12,7 @@ create table l1_message
|
||||
calldata TEXT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
status INTEGER NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
@@ -30,4 +30,4 @@ on l1_block (number);
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
-- +goose StatementEnd
|
||||
@@ -1,50 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l1_message
|
||||
(
|
||||
queue_index BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash);
|
||||
|
||||
create unique index l1_message_nonce_uindex
|
||||
on l1_message (queue_index);
|
||||
|
||||
create index l1_message_height_index
|
||||
on l1_message (height);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_message;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,50 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_message
|
||||
(
|
||||
nonce BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer2_hash VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
create unique index l2_message_nonce_uindex
|
||||
on l2_message (nonce);
|
||||
|
||||
create index l2_message_height_index
|
||||
on l2_message (height);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_message;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,49 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table block_batch
|
||||
(
|
||||
hash VARCHAR NOT NULL,
|
||||
index BIGINT NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
start_block_hash VARCHAR NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
end_block_hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
state_root VARCHAR NOT NULL,
|
||||
total_tx_num BIGINT NOT NULL,
|
||||
total_l1_tx_num BIGINT NOT NULL,
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT 0,
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
finalize_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
committed_at TIMESTAMP(0) DEFAULT NULL,
|
||||
finalized_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
|
||||
comment
|
||||
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
|
||||
comment
|
||||
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index block_batch_hash_uindex
|
||||
on block_batch (hash);
|
||||
create unique index block_batch_index_uindex
|
||||
on block_batch (index);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists block_batch;
|
||||
-- +goose StatementEnd
|
||||
@@ -15,6 +15,7 @@ create table batch
|
||||
batch_header BYTEA NOT NULL,
|
||||
|
||||
-- proof
|
||||
chunk_proofs_ready SMALLINT NOT NULL DEFAULT 0,
|
||||
proving_status SMALLINT NOT NULL DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
@@ -1,18 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table session_info
|
||||
(
|
||||
hash VARCHAR NOT NULL,
|
||||
rollers_info BYTEA NOT NULL
|
||||
);
|
||||
|
||||
create unique index session_info_hash_uindex
|
||||
on session_info (hash);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists session_info;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,33 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table l1_block
|
||||
(
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
header_rlp TEXT NOT NULL,
|
||||
base_fee BIGINT NOT NULL,
|
||||
block_status INTEGER DEFAULT 1,
|
||||
import_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
comment
|
||||
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index l1_block_hash_uindex
|
||||
on l1_block (hash);
|
||||
|
||||
create unique index l1_block_number_uindex
|
||||
on l1_block (number);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
30
database/migrate/migrations/00006_prover_task.sql
Normal file
30
database/migrate/migrations/00006_prover_task.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table prover_task
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
task_id VARCHAR NOT NULL,
|
||||
prover_public_key VARCHAR NOT NULL,
|
||||
prover_name VARCHAR NOT NULL,
|
||||
task_type SMALLINT NOT NULL DEFAULT 0,
|
||||
proving_status SMALLINT NOT NULL DEFAULT 0,
|
||||
failure_type SMALLINT NOT NULL DEFAULT 0,
|
||||
reward BIGINT NOT NULL DEFAULT 0,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
CONSTRAINT uk_tasktype_taskid_publickey UNIQUE (task_type, task_id, prover_public_key)
|
||||
);
|
||||
|
||||
comment
|
||||
on column batch.proving_status is 'roller assigned, roller proof valid, roller proof invalid';
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists prover_task;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,38 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table agg_task
|
||||
(
|
||||
id VARCHAR NOT NULL,
|
||||
start_batch_index BIGINT NOT NULL,
|
||||
start_batch_hash VARCHAR NOT NULL,
|
||||
end_batch_index BIGINT NOT NULL,
|
||||
end_batch_hash VARCHAR NOT NULL,
|
||||
proving_status SMALLINT DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
create unique index agg_task_hash_uindex
|
||||
on agg_task (id);
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON agg_task FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists agg_task;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,104 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
type aggTaskOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
var _ AggTaskOrm = (*aggTaskOrm)(nil)
|
||||
|
||||
// NewAggTaskOrm creates an AggTaskOrm instance
|
||||
func NewAggTaskOrm(db *sqlx.DB) AggTaskOrm {
|
||||
return &aggTaskOrm{db: db}
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error) {
|
||||
var (
|
||||
startIdx uint64
|
||||
endIdx uint64
|
||||
)
|
||||
row := a.db.QueryRow("SELECT start_batch_index, end_batch_index FROM agg_task where id = $1", id)
|
||||
err := row.Scan(&startIdx, &endIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows, err := a.db.Queryx("SELECT proof FROM block_batch WHERE index>=$1 AND index<=$2 and proving_status = $3", startIdx, endIdx, types.ProvingTaskVerified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var subProofs []*message.AggProof
|
||||
for rows.Next() {
|
||||
var proofByt []byte
|
||||
err = rows.Scan(&proofByt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(proofByt, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subProofs = append(subProofs, &proof)
|
||||
}
|
||||
return subProofs, nil
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) GetUnassignedAggTasks() ([]*types.AggTask, error) {
|
||||
rows, err := a.db.Queryx("SELECT * FROM agg_task where proving_status = 1;")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a.rowsToAggTask(rows)
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) GetAssignedAggTasks() ([]*types.AggTask, error) {
|
||||
rows, err := a.db.Queryx(`SELECT * FROM agg_task WHERE proving_status IN ($1, $2)`, types.ProvingTaskAssigned, types.ProvingTaskProved)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a.rowsToAggTask(rows)
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error {
|
||||
sqlStr := "INSERT INTO agg_task (id, start_batch_index, start_batch_hash, end_batch_index, end_batch_hash) VALUES ($1, $2, $3, $4, $5)"
|
||||
_, err := a.db.Exec(sqlStr, id, startBatchIndex, startBatchHash, endBatchIndex, endBatchHash)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error {
|
||||
_, err := a.db.Exec(a.db.Rebind("update agg_task set proving_status = ? where id = ?;"), status, aggTaskID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error {
|
||||
proofByt, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = a.db.Exec(a.db.Rebind("update agg_task set proving_status = ?, proof = ? where id = ?;"), types.ProvingTaskProved, proofByt, aggTaskID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) rowsToAggTask(rows *sqlx.Rows) ([]*types.AggTask, error) {
|
||||
var tasks []*types.AggTask
|
||||
for rows.Next() {
|
||||
task := new(types.AggTask)
|
||||
err := rows.StructScan(task)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
return tasks, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user