Compare commits

...

22 Commits

Author SHA1 Message Date
vyzo
95f2f7da0f fix(bridge): adjust gas fee cap in resumbitTransaction for rising basefee (#625)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-13 17:29:17 +08:00
Xi Lin
d2a1459768 fix(contracts): fix dropping message with nonce 0 (#640)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-13 16:48:45 +08:00
colin
f38dda8e02 feat(relayer): remove 'skipped' proving/rollup status and related code/tests (#642) 2023-07-13 16:39:02 +08:00
HAOYUatHZ
189ef09938 ci(github): only trigger build when merging (#641) 2023-07-13 09:07:26 +08:00
Péter Garamvölgyi
b79832566c feat(coordinator): Remove timestamp from roller protocol (#236) 2023-07-11 20:35:28 -07:00
Xi Lin
6841ef264c feat(contracts): add refund for skipped messages (#561)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-11 09:16:37 -07:00
ChuhanJin
425f74e763 feat(bridge-history-api): add new api to fetch all claimable txs under one address (#607)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-11 23:10:06 +08:00
georgehao
d59b2b4c41 refactor(coordinator): refactor task_prover's reward to decimal (#633)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-11 19:25:57 +08:00
Xi Lin
2323dd0daa fix(contracts): OZ-L1-H05 Users Can Lose Refund by Default (#605)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-10 10:02:58 -07:00
Péter Garamvölgyi
207d13c453 refactor(db): revise types in DB migrations (#631) 2023-07-10 14:39:25 +02:00
Péter Garamvölgyi
357173848a refactor(orm): set status fields explicitly during DB operations (#628)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-10 10:57:33 +02:00
Xi Lin
535ec91141 fix(contracts): OZ-L1-H07 L2 Standard ERC-20 Token Metadata Can Be Set Arbitrarily (#606)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-10 15:31:11 +08:00
Péter Garamvölgyi
8a0b526391 refactor(orm): Make ORM usage consistent (#627) 2023-07-10 09:18:08 +02:00
Steven
1b62c064ad fix(libzkp): replace tag with rev for halo2curves in libzkp (#629)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-10 12:41:27 +08:00
HAOYUatHZ
81ae4d3e7b refactor(libzkp): suppress compilation warnings (#575) 2023-07-10 12:28:51 +08:00
georgehao
a83e035845 feat(gorm): adapt the gorm logger with geth logger (#609)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-10 12:09:46 +08:00
Xi Lin
96452ee32b feat(contracts): add a simple usdc gateway (#587)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-08 11:39:37 +08:00
Xi Lin
811db8bcb9 feat(contracts): request ERC20 through gateway router (#566) 2023-07-07 12:24:44 -07:00
Péter Garamvölgyi
fbd50f3d82 refactor(orm): change chunk_proofs_ready to chunk_proofs_status (#626) 2023-07-07 19:22:24 +02:00
colin
faec817d34 feat(coordinator): upgrade coordinator to rollup v2 (#610)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-07 16:23:26 +02:00
ChuhanJin
72ef2cc80e fix(bridge-history-api): fix insert string slice and db type (#614)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-07-06 10:48:36 +08:00
Ahmed Castro
8f0690be41 refactor: turn L1ERC721Gateway and L1ERC1155Gateway internal functions virtual (#552)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2023-07-05 09:48:36 +02:00
182 changed files with 5497 additions and 5125 deletions

View File

@@ -29,6 +29,25 @@ jobs:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: nightly-2022-12-10 toolchain: nightly-2022-12-10
@@ -47,13 +66,6 @@ jobs:
- name: Test - name: Test
run: | run: |
make roller make roller
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
check: check:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -38,6 +38,17 @@ func setupQueryByAddressHandler(backend_app *mvc.Application) {
backend_app.Handle(new(controller.QueryAddressController)) backend_app.Handle(new(controller.QueryAddressController))
} }
func setupQueryClaimableHandler(backend_app *mvc.Application) {
// Register Dependencies.
backend_app.Register(
database,
service.NewHistoryService,
)
// Register Controllers.
backend_app.Handle(new(controller.QueryClaimableController))
}
func setupQueryByHashHandler(backend_app *mvc.Application) { func setupQueryByHashHandler(backend_app *mvc.Application) {
backend_app.Register( backend_app.Register(
database, database,
@@ -83,6 +94,7 @@ func action(ctx *cli.Context) error {
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler) mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler) mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
mvc.Configure(bridgeApp.Party("/api/claimable"), setupQueryClaimableHandler)
// TODO: make debug mode configurable // TODO: make debug mode configurable
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug")) err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))

View File

@@ -15,6 +15,23 @@ type QueryHashController struct {
Service service.HistoryService Service service.HistoryService
} }
type QueryClaimableController struct {
Service service.HistoryService
}
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: txs,
Total: total,
}}, nil
}
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) { func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit)) message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil { if err != nil {

View File

@@ -8,11 +8,9 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
backendabi "bridge-history-api/abi" backendabi "bridge-history-api/abi"
"bridge-history-api/db" "bridge-history-api/db"
"bridge-history-api/db/orm"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
@@ -100,19 +98,11 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l1 event logs", "err", err) log.Warn("Failed to get l1 event logs", "err", err)
return err return err
} }
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs) depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil { if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err) log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err return err
} }
for i := range depositL1CrossMsgs {
for _, msgHash := range msgHashes {
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
break
}
}
}
dbTx, err := database.Beginx() dbTx, err := database.Beginx()
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err) log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -161,22 +151,12 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l2 event logs", "err", err) log.Warn("Failed to get l2 event logs", "err", err)
return err return err
} }
depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs) depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err) log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err return err
} }
var l2SentMsgs []*orm.L2SentMsg
for i := range depositL2CrossMsgs {
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
break
}
}
}
dbTx, err := database.Beginx() dbTx, err := database.Beginx()
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err) log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -194,12 +174,10 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err) log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
} }
if len(l2SentMsgs) > 0 { err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs) if err != nil {
if err != nil { dbTx.Rollback()
dbTx.Rollback() log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
} }
err = dbTx.Commit() err = dbTx.Commit()
@@ -251,25 +229,3 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
} }
return nil return nil
} }
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}

View File

@@ -3,7 +3,7 @@
create table cross_message create table cross_message
( (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL DEFAULT '', msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL, height BIGINT NOT NULL,
sender VARCHAR NOT NULL, sender VARCHAR NOT NULL,
target VARCHAR NOT NULL, target VARCHAR NOT NULL,
@@ -14,10 +14,8 @@ create table cross_message
layer2_token VARCHAR NOT NULL DEFAULT '', layer2_token VARCHAR NOT NULL DEFAULT '',
asset SMALLINT NOT NULL, asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL, msg_type SMALLINT NOT NULL,
-- use array to support nft bridge token_ids TEXT NOT NULL DEFAULT '',
token_ids VARCHAR[] NOT NULL DEFAULT '{}', token_amounts TEXT NOT NULL DEFAULT '',
-- use array to support nft bridge
token_amounts VARCHAR[] NOT NULL DEFAULT '{}',
block_timestamp TIMESTAMP(0) DEFAULT NULL, block_timestamp TIMESTAMP(0) DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,

View File

@@ -19,6 +19,8 @@ CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at); CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -3,7 +3,8 @@
create table l2_sent_msg create table l2_sent_msg
( (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
tx_sender VARCHAR NOT NULL, original_sender VARCHAR NOT NULL DEFAULT '',
tx_hash VARCHAR NOT NULL,
sender VARCHAR NOT NULL, sender VARCHAR NOT NULL,
target VARCHAR NOT NULL, target VARCHAR NOT NULL,
value VARCHAR NOT NULL, value VARCHAR NOT NULL,
@@ -24,6 +25,8 @@ on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL; on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -50,8 +50,8 @@ type CrossMsg struct {
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"` Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"` Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"` Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenIDs []string `json:"token_ids" db:"token_ids"` TokenIDs string `json:"token_ids" db:"token_ids"`
TokenAmounts []string `json:"token_amounts" db:"token_amounts"` TokenAmounts string `json:"token_amounts" db:"token_amounts"`
Asset int `json:"asset" db:"asset"` Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"` MsgType int `json:"msg_type" db:"msg_type"`
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"` Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
@@ -86,6 +86,7 @@ type L2CrossMsgOrm interface {
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlockTimestampHeight() (uint64, error) GetL2EarliestNoBlockTimestampHeight() (uint64, error)
GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error)
} }
type RelayedMsgOrm interface { type RelayedMsgOrm interface {
@@ -106,6 +107,8 @@ type L2SentMsgOrm interface {
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
GetLatestL2SentMsgBatchIndex() (int64, error) GetLatestL2SentMsgBatchIndex() (int64, error)
GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error)
GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error)
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
} }

View File

@@ -65,6 +65,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"target": msg.Target, "target": msg.Target,
"amount": msg.Amount, "amount": msg.Amount,
"asset": msg.Asset, "asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer1_hash": msg.Layer1Hash, "layer1_hash": msg.Layer1Hash,
"layer1_token": msg.Layer1Token, "layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token, "layer2_token": msg.Layer2Token,
@@ -72,7 +73,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer1Msg, "msg_type": Layer1Msg,
} }
} }
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps) _, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err) log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
return err return err

View File

@@ -71,12 +71,12 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
var err error var err error
messageMaps := make([]map[string]interface{}, len(messages)) messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages { for i, msg := range messages {
messageMaps[i] = map[string]interface{}{ messageMaps[i] = map[string]interface{}{
"height": msg.Height, "height": msg.Height,
"sender": msg.Sender, "sender": msg.Sender,
"target": msg.Target, "target": msg.Target,
"asset": msg.Asset, "asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer2_hash": msg.Layer2Hash, "layer2_hash": msg.Layer2Hash,
"layer1_token": msg.Layer1Token, "layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token, "layer2_token": msg.Layer2Token,
@@ -85,7 +85,7 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer2Msg, "msg_type": Layer2Msg,
} }
} }
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps) _, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err) log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
return err return err
@@ -140,3 +140,22 @@ func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
} }
return result, nil return result, nil
} }
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash in ($1) AND msg_type = $2 AND deleted_at IS NULL;`, msgHashList, Layer2Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
if len(results) == 0 {
log.Debug("no L2CrossMsg under given msg hashes", "msg hash list", msgHashList)
}
return results, nil
}

View File

@@ -10,20 +10,21 @@ import (
) )
type L2SentMsg struct { type L2SentMsg struct {
ID uint64 `json:"id" db:"id"` ID uint64 `json:"id" db:"id"`
TxSender string `json:"tx_sender" db:"tx_sender"` OriginalSender string `json:"original_sender" db:"original_sender"`
MsgHash string `json:"msg_hash" db:"msg_hash"` TxHash string `json:"tx_hash" db:"tx_hash"`
Sender string `json:"sender" db:"sender"` MsgHash string `json:"msg_hash" db:"msg_hash"`
Target string `json:"target" db:"target"` Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"` Target string `json:"target" db:"target"`
Height uint64 `json:"height" db:"height"` Value string `json:"value" db:"value"`
Nonce uint64 `json:"nonce" db:"nonce"` Height uint64 `json:"height" db:"height"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"` Nonce uint64 `json:"nonce" db:"nonce"`
MsgProof string `json:"msg_proof" db:"msg_proof"` BatchIndex uint64 `json:"batch_index" db:"batch_index"`
MsgData string `json:"msg_data" db:"msg_data"` MsgProof string `json:"msg_proof" db:"msg_proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"` MsgData string `json:"msg_data" db:"msg_data"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"` CreatedAt *time.Time `json:"created_at" db:"created_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"` UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
} }
type l2SentMsgOrm struct { type l2SentMsgOrm struct {
@@ -52,19 +53,20 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
messageMaps := make([]map[string]interface{}, len(messages)) messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages { for i, msg := range messages {
messageMaps[i] = map[string]interface{}{ messageMaps[i] = map[string]interface{}{
"tx_sender": msg.TxSender, "original_sender": msg.OriginalSender,
"sender": msg.Sender, "tx_hash": msg.TxHash,
"target": msg.Target, "sender": msg.Sender,
"value": msg.Value, "target": msg.Target,
"msg_hash": msg.MsgHash, "value": msg.Value,
"height": msg.Height, "msg_hash": msg.MsgHash,
"nonce": msg.Nonce, "height": msg.Height,
"batch_index": msg.BatchIndex, "nonce": msg.Nonce,
"msg_proof": msg.MsgProof, "batch_index": msg.BatchIndex,
"msg_data": msg.MsgData, "msg_proof": msg.MsgProof,
"msg_data": msg.MsgData,
} }
} }
_, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps) _, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, tx_hash, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :tx_hash, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err) log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err return err
@@ -95,7 +97,7 @@ func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sql
} }
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) { func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`) row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
var result sql.NullInt64 var result sql.NullInt64
if err := row.Scan(&result); err != nil { if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid { if err == sql.ErrNoRows || !result.Valid {
@@ -149,3 +151,28 @@ func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int6
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height) _, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
return err return err
} }
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset)
if err != nil {
return nil, err
}
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
return results, err
}
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error) {
var count uint64
row := l.db.QueryRowx(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1);`, address)
if err := row.Scan(&count); err != nil {
return 0, err
}
return count, nil
}

View File

@@ -47,6 +47,7 @@ type TxHistoryInfo struct {
type HistoryService interface { type HistoryService interface {
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
} }
// NewHistoryService returns a service backed with a "db" // NewHistoryService returns a service backed with a "db"
@@ -106,6 +107,47 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
} }
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
total, err := h.db.GetClaimableL2SentMsgByAddressTotalNum(address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := h.db.GetClaimableL2SentMsgByAddressWithOffset(address.Hex(), offset, limit)
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
var msgHashList []string
for _, result := range results {
msgHashList = append(msgHashList, result.MsgHash)
}
crossMsgs, err := h.db.GetL2CrossMsgByMsgHashList(msgHashList)
if err != nil || len(crossMsgs) == 0 {
return txHistories, 0, err
}
crossMsgMap := make(map[string]*orm.CrossMsg)
for _, crossMsg := range crossMsgs {
crossMsgMap[crossMsg.MsgHash] = crossMsg
}
for _, result := range results {
txInfo := &TxHistoryInfo{
Hash: result.TxHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &Finalized{},
ClaimInfo: GetCrossTxClaimInfo(result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
txInfo.To = crossMsg.Target
txInfo.BlockTimestamp = crossMsg.Timestamp
txInfo.CreatedAt = crossMsg.CreatedAt
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) { func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo var txHistories []*TxHistoryInfo
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String()) total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())

View File

@@ -3,7 +3,6 @@ package utils
import ( import (
"context" "context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
@@ -13,16 +12,6 @@ import (
"bridge-history-api/db/orm" "bridge-history-api/db/orm"
) )
type MsgHashWrapper struct {
MsgHash common.Hash
TxHash common.Hash
}
type L2SentMsgWrapper struct {
L2SentMsg *orm.L2SentMsg
TxHash common.Hash
}
type CachedParsedTxCalldata struct { type CachedParsedTxCalldata struct {
CallDataIndex uint64 CallDataIndex uint64
BatchIndices []uint64 BatchIndices []uint64
@@ -30,13 +19,13 @@ type CachedParsedTxCalldata struct {
EndBlocks []uint64 EndBlocks []uint64
} }
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) { func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var msgHashes []MsgHashWrapper var msgHash string
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L1DepositETHSig: case backendabi.L1DepositETHSig:
@@ -44,7 +33,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog) err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err) log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -53,13 +42,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC20Sig: case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog) err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err) log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -70,13 +60,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC721Sig: case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{} event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog) err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err) log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -86,14 +77,15 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC1155Sig: case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog) err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err) log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -103,26 +95,26 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
MsgHash: msgHash,
}) })
case backendabi.L1SentMessageEventSignature: case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{} event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message) // since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHashes = append(msgHashes, MsgHashWrapper{ msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
MsgHash: msgHash,
TxHash: vlog.TxHash})
case backendabi.L1RelayedMessageEventSignature: case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{} event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -133,17 +125,17 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
} }
} }
return l1CrossMsg, msgHashes, relayedMsgs, nil return l1CrossMsg, relayedMsgs, nil
} }
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) { func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg // this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var l2SentMsg []L2SentMsgWrapper var l2SentMsgs []*orm.L2SentMsg
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig: case backendabi.L2WithdrawETHSig:
@@ -151,8 +143,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog) err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err) log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -160,14 +153,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC20Sig: case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog) err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err) log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -183,8 +178,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog) err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err) log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -193,15 +189,16 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
}) })
case backendabi.L2WithdrawERC1155Sig: case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog) err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err) log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -210,7 +207,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
}) })
case backendabi.L2SentMessageEventSignature: case backendabi.L2SentMessageEventSignature:
@@ -218,28 +215,27 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message) msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsg = append(l2SentMsg, l2SentMsgs = append(l2SentMsgs,
L2SentMsgWrapper{ &orm.L2SentMsg{
TxHash: vlog.TxHash, Sender: event.Sender.Hex(),
L2SentMsg: &orm.L2SentMsg{ TxHash: vlog.TxHash.Hex(),
Sender: event.Sender.Hex(), Target: event.Target.Hex(),
Target: event.Target.Hex(), Value: event.Value.String(),
Value: event.Value.String(), MsgHash: msgHash.Hex(),
MsgHash: msgHash.Hex(), Height: vlog.BlockNumber,
Height: vlog.BlockNumber, Nonce: event.MessageNonce.Uint64(),
Nonce: event.MessageNonce.Uint64(), MsgData: hexutil.Encode(event.Message),
MsgData: hexutil.Encode(event.Message),
},
}) })
case backendabi.L2RelayedMessageEventSignature: case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{} event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -249,7 +245,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
} }
} }
return l2CrossMsg, relayedMsgs, l2SentMsg, nil return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
} }
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) { func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {

View File

@@ -11,13 +11,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Name = "event-watcher" app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher" app.Usage = "The Scroll Event Watcher"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `event-watcher-test` app for integration-test. // Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp) utils.RegisterSimulation(app, utils.EventWatcherApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -75,14 +75,14 @@ func action(ctx *cli.Context) error {
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db) l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go cutils.Loop(subCtx, 10*time.Second, func() { go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil { if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr) log.Error("Failed to fetch bridge contract", "err", loopErr)
} }
}) })
// Start l2 watcher process // Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent) go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions // Finish start all l2 functions
log.Info("Start event-watcher successfully") log.Info("Start event-watcher successfully")

View File

@@ -11,14 +11,15 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils" butils "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -31,31 +32,31 @@ func init() {
app.Usage = "The Scroll Gas Oracle" app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle." app.Description = "Scroll Gas Oracle."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `gas-oracle-test` app for integration-test. // Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp) utils.RegisterSimulation(app, utils.GasOracleApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
} }
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -76,7 +77,8 @@ func action(ctx *cli.Context) error {
return err return err
} }
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig) l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil { if err != nil {
@@ -89,8 +91,8 @@ func action(ctx *cli.Context) error {
return err return err
} }
// Start l1 watcher process // Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) { go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations) number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -102,8 +104,8 @@ func action(ctx *cli.Context) error {
}) })
// Start l1relayer process // Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle) go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start gas-oracle successfully") log.Info("Start gas-oracle successfully")

View File

@@ -10,13 +10,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Usage = "The Scroll Message Relayer" app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1." app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `message-relayer-test` app for integration-test. // Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp) utils.RegisterSimulation(app, utils.MessageRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -69,7 +69,7 @@ func action(ctx *cli.Context) error {
} }
// Start l1relayer process // Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents) go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start message-relayer successfully") log.Info("Start message-relayer successfully")

View File

@@ -11,14 +11,15 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils" butils "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -30,19 +31,19 @@ func init() {
app.Name = "rollup-relayer" app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer" app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.RollupRelayerFlags...) app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `rollup-relayer-test` app for integration-test. // Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp) utils.RegisterSimulation(app, utils.RollupRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -50,13 +51,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -71,7 +72,7 @@ func action(ctx *cli.Context) error {
return err return err
} }
initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name) initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil { if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
@@ -90,11 +91,12 @@ func action(ctx *cli.Context) error {
return err return err
} }
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db) l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks // Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -102,13 +104,13 @@ func action(ctx *cli.Context) error {
l2watcher.TryFetchRunningMissingBlocks(number) l2watcher.TryFetchRunningMissingBlocks(number)
}) })
go cutils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk) go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go cutils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch) go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions. // Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully") log.Info("Start rollup-relayer successfully")

View File

@@ -4,17 +4,13 @@ go 1.19
require ( require (
github.com/agiledragon/gomonkey/v2 v2.9.0 github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/smartystreets/goconvey v1.8.0 github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0 golang.org/x/sync v0.1.0
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.1 gorm.io/gorm v1.25.1
) )
@@ -25,7 +21,6 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
@@ -34,9 +29,6 @@ require (
github.com/holiman/uint256 v1.2.2 // indirect github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
@@ -45,7 +37,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
@@ -62,9 +54,8 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.10.0 // indirect golang.org/x/crypto v0.10.0 // indirect
golang.org/x/sys v0.9.0 // indirect golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View File

@@ -29,9 +29,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -55,13 +52,6 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -69,16 +59,11 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -86,9 +71,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -96,10 +78,8 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -113,16 +93,12 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -146,11 +122,8 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -164,68 +137,37 @@ github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bC
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
@@ -235,18 +177,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64= gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=

View File

@@ -4,13 +4,15 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/common/database"
) )
// Config load configuration items. // Config load configuration items.
type Config struct { type Config struct {
L1Config *L1Config `json:"l1_config"` L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"` L2Config *L2Config `json:"l2_config"`
DBConfig *DBConfig `json:"db_config"` DBConfig *database.Config `json:"db_config"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.

View File

@@ -40,7 +40,7 @@ type ChunkProposerConfig struct {
type BatchProposerConfig struct { type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"` MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"` MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"` MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"` MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"` BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
} }

View File

@@ -159,13 +159,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2 // ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() { func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight() latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight(r.ctx)
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return return
} }
blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{ blocks, err := r.l1Block.GetL1Blocks(r.ctx, map[string]interface{}{
"number": latestBlockHeight, "number": latestBlockHeight,
}) })
if err != nil { if err != nil {

View File

@@ -12,13 +12,14 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -49,7 +50,7 @@ var (
) )
func setupL1RelayerDB(t *testing.T) *gorm.DB { func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -60,7 +61,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
// testCreateNewRelayer test create new relayer instance and stop // testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) { func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -68,7 +69,7 @@ func testCreateNewL1Relayer(t *testing.T) {
func testL1RelayerProcessSaveEvents(t *testing.T) { func testL1RelayerProcessSaveEvents(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
@@ -86,7 +87,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
func testL1RelayerMsgConfirm(t *testing.T) { func testL1RelayerMsgConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{ l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0}, {MsgHash: "msg-1", QueueIndex: 0},
@@ -123,12 +124,12 @@ func testL1RelayerMsgConfirm(t *testing.T) {
func testL1RelayerGasOracleConfirm(t *testing.T) { func testL1RelayerGasOracleConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
l1Block := []orm.L1Block{ l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending), BlockStatus: int16(types.L1BlockPending)},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending), BlockStatus: int16(types.L1BlockPending)},
} }
// Insert test data. // Insert test data.
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block)) assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
@@ -152,8 +153,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"}) msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"}) msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported && return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
}) })
@@ -162,7 +163,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
func testL1RelayerProcessGasPriceOracle(t *testing.T) { func testL1RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -174,28 +175,28 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
var l1BlockOrm *orm.L1Block var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() { convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error") targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 0, targetErr return 0, targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 100, nil return 100, nil
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
convey.Convey("GetL1Blocks failure", t, func() { convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1Blocks error") targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr return nil, targetErr
}) })
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
convey.Convey("Block not exist", t, func() { convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1},
@@ -205,12 +206,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{ {
Hash: "gas-oracle-1", Hash: "gas-oracle-1",
Number: 0, Number: 0,
GasOracleStatus: int(types.GasOraclePending), GasOracleStatus: int16(types.GasOraclePending),
}, },
} }
return tmpInfo, nil return tmpInfo, nil

View File

@@ -22,7 +22,6 @@ import (
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -30,7 +29,6 @@ var (
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
) )
// Layer2Relayer is responsible for // Layer2Relayer is responsible for
@@ -171,8 +169,8 @@ func (r *Layer2Relayer) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{{ Blocks: []*types.WrappedBlock{{
Header: genesis, Header: genesis,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
@@ -191,7 +189,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
} }
var batch *orm.Batch var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX) batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
if err != nil { if err != nil {
return fmt.Errorf("failed to insert batch: %v", err) return fmt.Errorf("failed to insert batch: %v", err)
} }
@@ -319,7 +317,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
} }
for _, batch := range pendingBatches { for _, batch := range pendingBatches {
// get current header and parent header. // get current header and parent header.
currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader) currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil { if err != nil {
log.Error("Failed to decode batch header", "index", batch.Index, "error", err) log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
return return
@@ -346,7 +344,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
encodedChunks := make([][]byte, len(dbChunks)) encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
var wrappedBlocks []*bridgeTypes.WrappedBlock var wrappedBlocks []*types.WrappedBlock
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
log.Error("Failed to fetch wrapped blocks", log.Error("Failed to fetch wrapped blocks",
@@ -354,7 +352,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
"end number", c.EndBlockNumber, "error", err) "end number", c.EndBlockNumber, "error", err)
return return
} }
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
var chunkBytes []byte var chunkBytes []byte
@@ -395,15 +393,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// ProcessCommittedBatches submit proof to layer 1 rollup contract // ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(int64(count))
log.Info("Skipping batches", "count", count)
}
// retrieves the earliest batch whose rollup status is 'committed' // retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{ fields := map[string]interface{}{
"rollup_status": types.RollupCommitted, "rollup_status": types.RollupCommitted,
@@ -431,11 +420,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// It's an intermediate state. The roller manager received the proof but has not verified // It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified. // the proof yet. We don't roll up the proof until it's verified.
return return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified: case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash) log.Info("Start to roll up zk proof", "hash", hash)
success := false success := false
@@ -455,8 +439,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
defer func() { defer func() {
// TODO: need to revisit this and have a more fine-grained error handling // TODO: need to revisit this and have a more fine-grained error handling
if !success { if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash) log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
} }

View File

@@ -12,19 +12,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
func setupL2RelayerDB(t *testing.T) *gorm.DB { func setupL2RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -34,7 +34,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) { func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -42,14 +42,14 @@ func testCreateNewRelayer(t *testing.T) {
func testL2RelayerProcessPendingBatches(t *testing.T) { func testL2RelayerProcessPendingBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
chunkOrm := orm.NewChunk(db) chunkOrm := orm.NewChunk(db)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
@@ -57,7 +57,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessPendingBatches() relayer.ProcessPendingBatches()
@@ -70,13 +70,13 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
func testL2RelayerProcessCommittedBatches(t *testing.T) { func testL2RelayerProcessCommittedBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
@@ -90,7 +90,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(statuses)) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0]) assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
@@ -108,70 +108,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0]) assert.Equal(t, types.RollupFinalizing, statuses[0])
} }
func testL2RelayerSkipBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batch.Hash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped),
createBatch(types.RollupCommitted, types.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped),
createBatch(types.RollupPending, types.ProvingTaskFailed),
createBatch(types.RollupCommitting, types.ProvingTaskFailed),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed),
createBatch(types.RollupFinalized, types.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
}
for _, id := range notSkipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
}
}
func testL2RelayerRollupConfirm(t *testing.T) { func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -187,7 +126,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys)) batchHashes := make([]string, len(processingKeys))
for i := range batchHashes { for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
batchHashes[i] = batch.Hash batchHashes[i] = batch.Hash
} }
@@ -232,13 +171,13 @@ func testL2RelayerRollupConfirm(t *testing.T) {
func testL2RelayerGasOracleConfirm(t *testing.T) { func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1}) batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
@@ -281,7 +220,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,10 +9,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -25,12 +26,12 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// l2 block // l2 block
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
// chunk // chunk
chunk1 *bridgeTypes.Chunk chunk1 *types.Chunk
chunk2 *bridgeTypes.Chunk chunk2 *types.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -45,7 +46,7 @@ func setupEnv(t *testing.T) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &config.DBConfig{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -58,19 +59,19 @@ func setupEnv(t *testing.T) {
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock1 = &bridgeTypes.WrappedBlock{} wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1) err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
assert.NoError(t, err) assert.NoError(t, err)
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 = &bridgeTypes.WrappedBlock{} wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2) err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -96,7 +97,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm) t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)

View File

@@ -343,6 +343,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
if gasTipCap.Cmp(feeData.gasTipCap) < 0 { if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap gasTipCap = feeData.gasTipCap
} }
// adjust for rising basefee
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 { if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice gasFeeCap = maxGasPrice
} }

View File

@@ -65,6 +65,7 @@ func TestSender(t *testing.T) {
t.Run("test min gas limit", testMinGasLimit) t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction) t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction) t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) }) t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -154,6 +155,43 @@ func testResubmitTransaction(t *testing.T) {
} }
} }
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransaction(t *testing.T) { func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes { for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// BatchProposer proposes batches based on available unbatched chunks. // BatchProposer proposes batches based on available unbatched chunks.
@@ -24,7 +25,7 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64 maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64 maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint64 maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64 minChunkNumPerBatch uint64
batchTimeoutSec uint64 batchTimeoutSec uint64
} }
@@ -154,8 +155,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return dbChunks, nil return dbChunks, nil
} }
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) { func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*bridgeTypes.Chunk, len(dbChunks)) chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
@@ -163,7 +164,7 @@ func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridge
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err) "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return nil, err return nil, err
} }
chunks[i] = &bridgeTypes.Chunk{ chunks[i] = &types.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
} }

View File

@@ -6,21 +6,20 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) { func testBatchProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// ChunkProposer proposes chunks based on available unchunked blocks. // ChunkProposer proposes chunks based on available unchunked blocks.
@@ -58,7 +59,7 @@ func (p *ChunkProposer) TryProposeChunk() {
} }
} }
func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error { func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
if chunk == nil { if chunk == nil {
log.Warn("proposed chunk is nil, cannot update in DB") log.Warn("proposed chunk is nil, cannot update in DB")
return nil return nil
@@ -78,7 +79,7 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
return err return err
} }
func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) { func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx) blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -166,5 +167,5 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
) )
return nil, nil return nil, nil
} }
return &bridgeTypes.Chunk{Blocks: blocks}, nil return &types.Chunk{Blocks: blocks}, nil
} }

View File

@@ -6,19 +6,20 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testChunkProposer(t *testing.T) { func testChunkProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -31,8 +32,8 @@ func testChunkProposer(t *testing.T) {
}, db) }, db)
cp.TryProposeChunk() cp.TryProposeChunk()
expectedChunk := &bridgeTypes.Chunk{ expectedChunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}, Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
} }
expectedHash, err := expectedChunk.Hash(0) expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -74,7 +74,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
} }
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight() savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0 savedL1BlockHeight = 0
@@ -149,9 +149,11 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
baseFee = block.BaseFee.Uint64() baseFee = block.BaseFee.Uint64()
} }
blocks = append(blocks, orm.L1Block{ blocks = append(blocks, orm.L1Block{
Number: uint64(height), Number: uint64(height),
Hash: block.Hash().String(), Hash: block.Hash().String(),
BaseFee: baseFee, BaseFee: baseFee,
GasOracleStatus: int16(types.GasOraclePending),
BlockStatus: int16(types.L1BlockPending),
}) })
} }

View File

@@ -17,6 +17,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
commonTypes "scroll-tech/common/types" commonTypes "scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -36,13 +37,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
func testFetchContractEvent(t *testing.T) { func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
} }
func testL1WatcherClientFetchBlockHeader(t *testing.T) { func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
convey.Convey("test toBlock < fromBlock", t, func() { convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64 var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 { if watcher.ProcessedBlockHeight() <= 0 {
@@ -114,7 +115,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
func testL1WatcherClientFetchContractEvent(t *testing.T) { func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
watcher.SetConfirmations(rpc.SafeBlockNumber) watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() { convey.Convey("get latest confirmed block number failure", t, func() {
@@ -259,7 +260,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
@@ -305,7 +306,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
@@ -347,7 +348,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},

View File

@@ -22,7 +22,6 @@ import (
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils" "scroll-tech/bridge/internal/utils"
) )
@@ -110,7 +109,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
} }
// Fetch and store block traces for missing blocks // Fetch and store block traces for missing blocks
for from := uint64(heightInDB) + 1; from <= blockHeight; from += blockTracesFetchLimit { for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1 to := from + blockTracesFetchLimit - 1
if to > blockHeight { if to > blockHeight {
@@ -160,7 +159,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
} }
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error { func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*bridgeTypes.WrappedBlock var blocks []*types.WrappedBlock
for number := from; number <= to; number++ { for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number) log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number))) block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -175,7 +174,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number) return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
} }
blocks = append(blocks, &bridgeTypes.WrappedBlock{ blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(), Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()), Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot), WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),

View File

@@ -21,6 +21,7 @@ import (
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -42,7 +43,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
subCtx, cancel := context.WithCancel(context.Background()) subCtx, cancel := context.WithCancel(context.Background())
defer func() { defer func() {
cancel() cancel()
defer utils.CloseDB(db) defer database.CloseDB(db)
}() }()
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
@@ -68,7 +69,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
func testFetchRunningMissingBlocks(t *testing.T) { func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t) _, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0]) auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
@@ -87,7 +88,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
wc := prepareWatcherClient(l2Cli, db, address) wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(latestHeight) wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && uint64(fetchedHeight) == latestHeight return err == nil && fetchedHeight == latestHeight
}) })
assert.True(t, ok) assert.True(t, ok)
} }
@@ -114,7 +115,7 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {
@@ -154,7 +155,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {

View File

@@ -9,12 +9,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -27,8 +28,8 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
@@ -40,7 +41,7 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &config.DBConfig{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -56,7 +57,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &bridgeTypes.WrappedBlock{} wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
@@ -66,7 +67,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &bridgeTypes.WrappedBlock{} wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
@@ -74,7 +75,7 @@ func setupEnv(t *testing.T) (err error) {
} }
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,8 +10,6 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -35,11 +33,12 @@ type Batch struct {
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof // proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup // rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
@@ -72,6 +71,7 @@ func (*Batch) TableName() string {
// The returned batches are sorted in ascending order by their index. // The returned batches are sorted in ascending order by their index.
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) { func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -89,44 +89,51 @@ func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, o
var batches []*Batch var batches []*Batch
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, err return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
} }
return batches, nil return batches, nil
} }
// GetBatchCount retrieves the total number of batches in the database. // GetBatchCount retrieves the total number of batches in the database.
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) { func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
var count int64 var count int64
err := o.db.WithContext(ctx).Model(&Batch{}).Count(&count).Error if err := db.Count(&count).Error; err != nil {
if err != nil { return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err)
return 0, err
} }
return uint64(count), nil return uint64(count), nil
} }
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash. // GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) { func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
var batch Batch var batch Batch
err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error if err := db.Find(&batch).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
return nil, err
} }
var proof message.AggProof var proof message.AggProof
err = json.Unmarshal(batch.Proof, &proof) if err := json.Unmarshal(batch.Proof, &proof); err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
return nil, err
} }
return &proof, nil return &proof, nil
} }
// GetLatestBatch retrieves the latest batch from the database. // GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) { func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch var latestBatch Batch
err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error if err := db.First(&latestBatch).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
return nil, err
} }
return &latestBatch, nil return &latestBatch, nil
} }
@@ -134,13 +141,17 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. // GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 { if len(hashes) == 0 {
return []types.RollupStatus{}, nil return nil, nil
} }
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
var batches []Batch var batches []Batch
err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error if err := db.Find(&batches).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes)
return nil, err
} }
hashToStatusMap := make(map[string]types.RollupStatus) hashToStatusMap := make(map[string]types.RollupStatus)
@@ -152,7 +163,7 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
for _, hash := range hashes { for _, hash := range hashes {
status, ok := hashToStatusMap[hash] status, ok := hashToStatusMap[hash]
if !ok { if !ok {
return nil, fmt.Errorf("hash not found in database: %s", hash) return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash)
} }
statuses = append(statuses, status) statuses = append(statuses, status)
} }
@@ -167,40 +178,40 @@ func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, err
return nil, errors.New("limit must be greater than zero") return nil, errors.New("limit must be greater than zero")
} }
var batches []*Batch
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ?", types.RollupPending)
db = db.Order("index ASC")
db = db.Limit(limit)
db = db.Where("rollup_status = ?", types.RollupPending).Order("index ASC").Limit(limit) var batches []*Batch
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, err return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
} }
return batches, nil return batches, nil
} }
// GetBatchByIndex retrieves the batch by the given index. // GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) { func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch var batch Batch
err := o.db.WithContext(ctx).Where("index = ?", index).First(&batch).Error if err := db.First(&batch).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
return nil, err
} }
return &batch, nil return &batch, nil
} }
// InsertBatch inserts a new batch into the database. // InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) { func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 { if len(chunks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
parentBatch, err := o.GetLatestBatch(ctx) parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err) log.Error("failed to get the latest batch", "err", err)
return nil, err return nil, err
} }
@@ -217,8 +228,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
batchIndex = parentBatch.Index + 1 batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash) parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *bridgeTypes.BatchHeader var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader) parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil { if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err) log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err return nil, err
@@ -228,7 +239,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
version = parentBatchHeader.Version() version = parentBatchHeader.Version()
} }
batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks) batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil { if err != nil {
log.Error("failed to create batch header", log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore, "index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
@@ -240,59 +251,53 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
lastChunkBlockNum := len(chunks[numChunks-1].Blocks) lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{ newBatch := Batch{
Index: batchIndex, Index: batchIndex,
Hash: batchHeader.Hash().Hex(), Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash, StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash, EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex, EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(), StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(), WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(), BatchHeader: batchHeader.Encode(),
ProvingStatus: int16(types.ProvingTaskUnassigned), ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
RollupStatus: int16(types.RollupPending), ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
} }
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil { db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err) log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, err return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
} }
return &newBatch, nil return &newBatch, nil
} }
// UpdateSkippedBatches updates the skipped batches in the database.
func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
}
return uint64(result.RowsAffected), nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch. // UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status) updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
} }
return nil return nil
} }
// UpdateProvingStatus updates the proving status of a batch. // UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -303,22 +308,24 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { db := o.db
return err if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
// UpdateRollupStatus updates the rollup status of a batch. // UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status) updateFields["rollup_status"] = int(status)
@@ -328,8 +335,17 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
case types.RollupFinalized: case types.RollupFinalized:
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -342,8 +358,13 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
if status == types.RollupCommitted { if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now() updateFields["committed_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
} }
return nil return nil
} }
@@ -356,23 +377,35 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
if status == types.RollupFinalized { if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
} }
return nil return nil
} }
// UpdateProofByHash updates the block batch proof by hash. // UpdateProofByHash updates the batch proof by hash.
// for unit test. // for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error { func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof) proofBytes, err := json.Marshal(proof)
if err != nil { if err != nil {
return err return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
} }
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err = db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
} }

View File

@@ -3,12 +3,11 @@ package orm
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"time" "time"
"scroll-tech/common/types" "scroll-tech/common/types"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -26,22 +25,22 @@ type Chunk struct {
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"` EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"` StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"` TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"` TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
// proof // proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -63,19 +62,22 @@ func (*Chunk) TableName() string {
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) { func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
if startIndex > endIndex { if startIndex > endIndex {
return nil, errors.New("start index should be less than or equal to end index") return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex)
} }
var chunks []*Chunk db := o.db.WithContext(ctx)
db := o.db.WithContext(ctx).Where("index >= ? AND index <= ?", startIndex, endIndex) db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC") db = db.Order("index ASC")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil { if err := db.Find(&chunks).Error; err != nil {
return nil, err return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex)
} }
if startIndex+uint64(len(chunks)) != endIndex+1 { // sanity check
return nil, errors.New("number of chunks not expected in the specified range") if uint64(len(chunks)) != endIndex-startIndex+1 {
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
} }
return chunks, nil return chunks, nil
@@ -83,46 +85,43 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
// GetUnbatchedChunks retrieves unbatched chunks from the database. // GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) { func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk var chunks []*Chunk
err := o.db.WithContext(ctx). if err := db.Find(&chunks).Error; err != nil {
Where("batch_hash IS NULL"). return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
Order("index asc").
Find(&chunks).Error
if err != nil {
return nil, err
} }
return chunks, nil return chunks, nil
} }
// GetLatestChunk retrieves the latest chunk from the database. // GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) { func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk var latestChunk Chunk
err := o.db.WithContext(ctx). if err := db.First(&latestChunk).Error; err != nil {
Order("index desc"). return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
First(&latestChunk).Error
if err != nil {
return nil, err
} }
return &latestChunk, nil return &latestChunk, nil
} }
// InsertChunk inserts a new chunk into the database. // InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) { func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 { if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
var chunkIndex uint64 var chunkIndex uint64
var totalL1MessagePoppedBefore uint64 var totalL1MessagePoppedBefore uint64
parentChunk, err := o.GetLatestChunk(ctx) parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err) log.Error("failed to get latest chunk", "err", err)
return nil, err return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
} }
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's // if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
@@ -130,13 +129,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk // if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil { if parentChunk != nil {
chunkIndex = parentChunk.Index + 1 chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
} }
hash, err := chunk.Hash(totalL1MessagePoppedBefore) hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil { if err != nil {
log.Error("failed to get chunk hash", "err", err) log.Error("failed to get chunk hash", "err", err)
return nil, err return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
} }
var totalL2TxGas uint64 var totalL2TxGas uint64
@@ -159,18 +158,24 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(), EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(), EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas, TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: totalL2TxNum, TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: totalL1CommitCalldataSize, TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas, TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time, StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore, TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore), TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil { if err := db.Create(&newChunk).Error; err != nil {
log.Error("failed to insert chunk", "hash", hash, "err", err) return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
return nil, err
} }
return &newChunk, nil return &newChunk, nil
@@ -178,11 +183,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
// UpdateProvingStatus updates the proving status of a chunk. // UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -193,11 +193,18 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil { db := o.db
return err if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -209,10 +216,12 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex) db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil { if err := db.Update("batch_hash", batchHash).Error; err != nil {
return err return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
} }
return nil return nil
} }

View File

@@ -2,8 +2,9 @@ package orm
import ( import (
"context" "context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
@@ -13,14 +14,23 @@ import (
type L1Block struct { type L1Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"column:number"` // block
Hash string `json:"hash" gorm:"column:hash"` Number uint64 `json:"number" gorm:"column:number"`
HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"` Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"` BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"` // import
GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"` BlockStatus int16 `json:"block_status" gorm:"column:block_status;default:1"`
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"`
// oracle
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Block create an l1Block instance // NewL1Block create an l1Block instance
@@ -34,54 +44,64 @@ func (*L1Block) TableName() string {
} }
// GetLatestL1BlockHeight get the latest l1 block height // GetLatestL1BlockHeight get the latest l1 block height
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) { func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) {
result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row() db := o.db.WithContext(ctx)
if result.Err() != nil { db = db.Model(&L1Block{})
return 0, result.Err() db = db.Select("COALESCE(MAX(number), 0)")
}
var maxNumber uint64 var maxNumber uint64
if err := result.Scan(&maxNumber); err != nil { if err := db.Row().Scan(&maxNumber); err != nil {
return 0, err return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err)
} }
return maxNumber, nil return maxNumber, nil
} }
// GetL1Blocks get the l1 blocks // GetL1Blocks get the l1 blocks
func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) { func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) {
var l1Blocks []L1Block db := o.db.WithContext(ctx)
db := l.db db = db.Model(&L1Block{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
} }
db = db.Order("number ASC") db = db.Order("number ASC")
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil { if err := db.Find(&l1Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields)
} }
return l1Blocks, nil return l1Blocks, nil
} }
// InsertL1Blocks batch insert l1 blocks // InsertL1Blocks batch insert l1 blocks
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error { func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 { if len(blocks) == 0 {
return nil return nil
} }
err := l.db.WithContext(ctx).Create(&blocks).Error db := o.db.WithContext(ctx)
if err != nil { db = db.Model(&L1Block{})
log.Error("failed to insert L1 Blocks", "err", err)
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
} }
return err return nil
} }
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash // UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error { func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{ updateFields := map[string]interface{}{
"oracle_status": int(status), "oracle_status": int(status),
"oracle_tx_hash": txHash, "oracle_tx_hash": txHash,
} }
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Where("hash", blockHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
} }
return nil return nil
} }

View File

@@ -3,6 +3,7 @@ package orm
import ( import (
"context" "context"
"database/sql" "database/sql"
"time"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -25,6 +26,11 @@ type L1Message struct {
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"` Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"` Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"` Status int `json:"status" gorm:"column:status;default:1"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Message create an L1MessageOrm instance // NewL1Message create an L1MessageOrm instance

View File

@@ -3,31 +3,39 @@ package orm
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"time"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/bridge/internal/types" "scroll-tech/common/types"
) )
// L2Block represents a l2 block in the database. // L2Block represents a l2 block in the database.
type L2Block struct { type L2Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"` Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"` Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"` ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"` Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"` Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"` WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint64 `json:"tx_num" gorm:"tx_num"` TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"` GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"` BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL2Block creates a new L2Block instance // NewL2Block creates a new L2Block instance
@@ -42,25 +50,30 @@ func (*L2Block) TableName() string {
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block. // GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
// If the l2_block table is empty, it returns 0 to represent the genesis block height. // If the l2_block table is empty, it returns 0 to represent the genesis block height.
// In case of an error, it returns -1 along with the error. func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) { db := o.db.WithContext(ctx)
var maxNumber int64 db = db.Model(&L2Block{})
if err := o.db.WithContext(ctx).Model(&L2Block{}).Select("COALESCE(MAX(number), 0)").Row().Scan(&maxNumber); err != nil { db = db.Select("COALESCE(MAX(number), 0)")
return -1, err
}
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
}
return maxNumber, nil return maxNumber, nil
} }
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk. // GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) { func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
var l2Blocks []L2Block var l2Blocks []L2Block
if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root"). if err := db.Find(&l2Blocks).Error; err != nil {
Where("chunk_hash IS NULL"). return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
Order("number asc").
Find(&l2Blocks).Error; err != nil {
return nil, err
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -68,12 +81,12 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -87,6 +100,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
// The returned L2Blocks are sorted in ascending order by their block number. // The returned L2Blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) { func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -104,7 +118,7 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
var l2Blocks []*L2Block var l2Blocks []*L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
} }
return l2Blocks, nil return l2Blocks, nil
} }
@@ -114,20 +128,23 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) { func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
if startBlockNumber > endBlockNumber { if startBlockNumber > endBlockNumber {
return nil, errors.New("start block number should be less than or equal to end block number") return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
} }
var l2Blocks []L2Block
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber) db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC") db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 { if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, errors.New("number of blocks not expected in the specified range") return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -135,12 +152,12 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -157,13 +174,13 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
header, err := json.Marshal(block.Header) header, err := json.Marshal(block.Header)
if err != nil { if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return err return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
txs, err := json.Marshal(block.Transactions) txs, err := json.Marshal(block.Transactions)
if err != nil { if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return err return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
l2Block := L2Block{ l2Block := L2Block{
@@ -172,7 +189,7 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
ParentHash: block.Header.ParentHash.String(), ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs), Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(), WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint64(len(block.Transactions)), TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed, GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time, BlockTimestamp: block.Header.Time,
Header: string(header), Header: string(header),
@@ -180,9 +197,11 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
l2Blocks = append(l2Blocks, l2Block) l2Blocks = append(l2Blocks, l2Block)
} }
if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil { db := o.db.WithContext(ctx)
log.Error("failed to insert l2Blocks", "err", err) db = db.Model(&L2Block{})
return err
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
return nil return nil
} }
@@ -196,13 +215,19 @@ func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64,
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
db = db.WithContext(ctx).Model(&L2Block{}).Where("number >= ? AND number <= ?", startIndex, endIndex)
tx := db.Update("chunk_hash", chunkHash) tx := db.Update("chunk_hash", chunkHash)
if tx.Error != nil {
if tx.RowsAffected != int64(endIndex-startIndex+1) { return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash)
return fmt.Errorf("expected %d rows to be updated, got %d", endIndex-startIndex+1, tx.RowsAffected)
} }
return tx.Error // sanity check
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
}
return nil
} }

View File

@@ -1,61 +0,0 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -1,86 +0,0 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -1,37 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -10,13 +10,11 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/database/migrate"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -27,10 +25,10 @@ var (
chunkOrm *Chunk chunkOrm *Chunk
batchOrm *Batch batchOrm *Batch
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
chunk1 *bridgeTypes.Chunk chunk1 *types.Chunk
chunk2 *bridgeTypes.Chunk chunk2 *types.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -46,8 +44,8 @@ func setupEnv(t *testing.T) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
base.RunDBImage(t) base.RunDBImage(t)
var err error var err error
db, err = utils.InitDB( db, err = database.InitDB(
&config.DBConfig{ &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -64,28 +62,22 @@ func setupEnv(t *testing.T) {
l2BlockOrm = NewL2Block(db) l2BlockOrm = NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
if err != nil { assert.NoError(t, err)
t.Fatalf("failed to read file: %v", err) wrappedBlock1 = &types.WrappedBlock{}
} err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
wrappedBlock1 = &bridgeTypes.WrappedBlock{} assert.NoError(t, err)
if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
if err != nil { assert.NoError(t, err)
t.Fatalf("failed to read file: %v", err) wrappedBlock2 = &types.WrappedBlock{}
} err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
wrappedBlock2 = &bridgeTypes.WrappedBlock{} assert.NoError(t, err)
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -102,12 +94,12 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(3), height) assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background()) blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
@@ -135,9 +127,6 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
@@ -177,35 +166,24 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
hash1 := batch1.Hash hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0) batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader) batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex() batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1) assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
hash2 := batch2.Hash hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1) batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader) batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash2 := batchHeader2.Hash().Hex() batchHash2 := batchHeader2.Hash().Hex()
assert.Equal(t, hash2, batchHash2) assert.Equal(t, hash2, batchHash2)
@@ -224,32 +202,11 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, types.RollupPending, rollupStatus[0]) assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1]) assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted)
assert.NoError(t, err)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(0), count)
batch, err := batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus))
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1) dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err, gorm.ErrRecordNotFound) assert.Error(t, err)
assert.Nil(t, dbProof) assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)

View File

@@ -1,136 +0,0 @@
package types
import (
"encoding/binary"
"errors"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -1,43 +0,0 @@
package utils
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/bridge/internal/config"
)
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -11,12 +11,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/database/migrate"
bcmd "scroll-tech/bridge/cmd" bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge" "scroll-tech/bridge/mock_bridge"
) )
@@ -46,13 +46,13 @@ var (
) )
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
cfg := &config.DBConfig{ cfg := &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum, MaxIdleNum: base.DBConfig.MaxIdleNum,
} }
db, err := utils.InitDB(cfg) db, err := database.InitDB(cfg)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,18 +9,17 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testImportL1GasPrice(t *testing.T) { func testImportL1GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -44,10 +43,10 @@ func testImportL1GasPrice(t *testing.T) {
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
// check db status // check db status
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight() latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight) assert.Equal(t, number, latestBlockHeight)
blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight}) blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Empty(t, blocks[0].OracleTxHash) assert.Empty(t, blocks[0].OracleTxHash)
@@ -55,7 +54,7 @@ func testImportL1GasPrice(t *testing.T) {
// relay gas price // relay gas price
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight}) blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.NotEmpty(t, blocks[0].OracleTxHash) assert.NotEmpty(t, blocks[0].OracleTxHash)
@@ -64,7 +63,7 @@ func testImportL1GasPrice(t *testing.T) {
func testImportL2GasPrice(t *testing.T) { func testImportL2GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config l2Cfg := bridgeApp.Config.L2Config
@@ -72,8 +71,8 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// add fake chunk // add fake chunk
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{ Blocks: []*types.WrappedBlock{
{ {
Header: &gethTypes.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1), Number: big.NewInt(1),
@@ -90,7 +89,7 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk}) _, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
assert.NoError(t, err) assert.NoError(t, err)
// check db status // check db status

View File

@@ -11,17 +11,17 @@ import (
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
) )
func testRelayL1MessageSucceed(t *testing.T) { func testRelayL1MessageSucceed(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)

View File

@@ -11,16 +11,15 @@ import (
_ "scroll-tech/bridge/cmd/msg_relayer/app" _ "scroll-tech/bridge/cmd/msg_relayer/app"
_ "scroll-tech/bridge/cmd/rollup_relayer/app" _ "scroll-tech/bridge/cmd/rollup_relayer/app"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/bridge/internal/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func testProcessStart(t *testing.T) { func testProcessStart(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
bridgeApp.RunApp(t, cutils.EventWatcherApp) bridgeApp.RunApp(t, cutils.EventWatcherApp)
bridgeApp.RunApp(t, cutils.GasOracleApp) bridgeApp.RunApp(t, cutils.GasOracleApp)
@@ -32,7 +31,7 @@ func testProcessStart(t *testing.T) {
func testProcessStartEnableMetrics(t *testing.T) { func testProcessStartEnableMetrics(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
port, err := rand.Int(rand.Reader, big.NewInt(2000)) port, err := rand.Int(rand.Reader, big.NewInt(2000))
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,6 +10,7 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -17,13 +18,11 @@ import (
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testCommitBatchAndFinalizeBatch(t *testing.T) { func testCommitBatchAndFinalizeBatch(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -37,7 +36,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db // add some blocks to db
var wrappedBlocks []*bridgeTypes.WrappedBlock var wrappedBlocks []*types.WrappedBlock
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
header := gethTypes.Header{ header := gethTypes.Header{
Number: big.NewInt(int64(i)), Number: big.NewInt(int64(i)),
@@ -45,7 +44,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
} }
wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{ wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
Header: &header, Header: &header,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},

View File

@@ -1,56 +0,0 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -1,134 +0,0 @@
package utils
import (
"context"
"encoding/json"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
var (
tests = []struct {
input string
mustFail bool
expected rpc.BlockNumber
}{
{`"0x"`, true, rpc.BlockNumber(0)},
{`"0x0"`, false, rpc.BlockNumber(0)},
{`"0X1"`, false, rpc.BlockNumber(1)},
{`"0x00"`, true, rpc.BlockNumber(0)},
{`"0x01"`, true, rpc.BlockNumber(0)},
{`"0x1"`, false, rpc.BlockNumber(1)},
{`"0x12"`, false, rpc.BlockNumber(18)},
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
{"0", true, rpc.BlockNumber(0)},
{`"ff"`, true, rpc.BlockNumber(0)},
{`"safe"`, false, rpc.SafeBlockNumber},
{`"finalized"`, false, rpc.FinalizedBlockNumber},
{`"pending"`, false, rpc.PendingBlockNumber},
{`"latest"`, false, rpc.LatestBlockNumber},
{`"earliest"`, false, rpc.EarliestBlockNumber},
{`someString`, true, rpc.BlockNumber(0)},
{`""`, true, rpc.BlockNumber(0)},
{``, true, rpc.BlockNumber(0)},
}
)
func TestUnmarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
err := json.Unmarshal([]byte(test.input), &num)
if test.mustFail && err == nil {
t.Errorf("Test %d should fail", i)
continue
}
if !test.mustFail && err != nil {
t.Errorf("Test %d should pass but got err: %v", i, err)
continue
}
if num != test.expected {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.NoError(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.NoError(t, err)
got, err := json.Marshal(&num)
assert.NoError(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
}
type MockEthClient struct {
val uint64
}
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
return e.val, nil
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
var blockNumber int64
switch number.Int64() {
case int64(rpc.LatestBlockNumber):
blockNumber = int64(e.val)
case int64(rpc.SafeBlockNumber):
blockNumber = int64(e.val) - 6
case int64(rpc.FinalizedBlockNumber):
blockNumber = int64(e.val) - 12
default:
blockNumber = number.Int64()
}
if blockNumber < 0 {
blockNumber = 0
}
return &types.Header{Number: new(big.Int).SetInt64(blockNumber)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
testCases := []struct {
blockNumber uint64
confirmation rpc.BlockNumber
expectedResult uint64
}{
{5, 6, 0},
{7, 6, 1},
{10, 2, 8},
{0, 1, 0},
{3, 0, 3},
{15, 15, 0},
{16, rpc.SafeBlockNumber, 10},
{22, rpc.FinalizedBlockNumber, 10},
{10, rpc.LatestBlockNumber, 10},
{5, rpc.SafeBlockNumber, 0},
{11, rpc.FinalizedBlockNumber, 0},
}
for _, testCase := range testCases {
client.val = testCase.blockNumber
confirmed, err := GetLatestConfirmedBlockNumber(ctx, &client, testCase.confirmation)
assert.NoError(t, err)
assert.Equal(t, testCase.expectedResult, confirmed)
}
}

View File

@@ -1,65 +0,0 @@
package utils
import (
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
bridgeabi "scroll-tech/bridge/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := bridgeabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}

View File

@@ -1,49 +0,0 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := ComputeMessageHash(
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
big.NewInt(0),
big.NewInt(1),
[]byte("testbridgecontract"),
)
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
}
func TestBufferToUint256Le(t *testing.T) {
input := []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
expectedOutput := []*big.Int{big.NewInt(1)}
result := BufferToUint256Le(input)
assert.Equal(t, expectedOutput, result)
}

View File

@@ -1,7 +1,7 @@
package config package database
// DBConfig db config // Config db config
type DBConfig struct { type Config struct {
// data source name // data source name
DSN string `json:"dsn"` DSN string `json:"dsn"`
DriverName string `json:"driver_name"` DriverName string `json:"driver_name"`

80
common/database/db.go Normal file
View File

@@ -0,0 +1,80 @@
package database
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/utils"
)
type gormLogger struct {
gethLogger log.Logger
}
func (g *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
return g
}
func (g *gormLogger) Info(_ context.Context, msg string, data ...interface{}) {
infoMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Info("gorm", "info message", infoMsg)
}
func (g *gormLogger) Warn(_ context.Context, msg string, data ...interface{}) {
warnMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Warn("gorm", "warn message", warnMsg)
}
func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
errMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Error("gorm", "err message", errMsg)
}
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler
func InitDB(config *Config) (*gorm.DB, error) {
tmpGormLogger := gormLogger{
gethLogger: log.Root(),
}
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: &tmpGormLogger,
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,35 @@
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}

View File

@@ -13,6 +13,8 @@ require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.1
) )
require ( require (
@@ -51,7 +53,12 @@ require (
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -93,7 +100,6 @@ require (
golang.org/x/text v0.10.0 // indirect golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect golang.org/x/tools v0.8.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -212,9 +212,20 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
@@ -240,6 +251,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -354,6 +366,7 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -393,6 +406,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -415,6 +429,7 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -432,6 +447,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -462,6 +479,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -485,6 +503,9 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -500,6 +521,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -537,21 +559,29 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -591,6 +621,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -662,6 +693,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=

View File

@@ -1898,7 +1898,7 @@ source = "git+https://github.com/scroll-tech/halo2-snark-aggregator?branch=scrol
dependencies = [ dependencies = [
"group", "group",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"num-bigint", "num-bigint",
"num-integer", "num-integer",
] ]
@@ -1929,7 +1929,7 @@ dependencies = [
"digest 0.10.3", "digest 0.10.3",
"group", "group",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"log", "log",
"num-bigint", "num-bigint",
"poseidon", "poseidon",
@@ -1947,7 +1947,7 @@ dependencies = [
"halo2-ecc-circuit-lib", "halo2-ecc-circuit-lib",
"halo2-snark-aggregator-api", "halo2-snark-aggregator-api",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"log", "log",
"rand 0.8.5", "rand 0.8.5",
"rand_core 0.6.3", "rand_core 0.6.3",
@@ -1966,7 +1966,7 @@ dependencies = [
"halo2-snark-aggregator-api", "halo2-snark-aggregator-api",
"halo2-snark-aggregator-circuit", "halo2-snark-aggregator-circuit",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"log", "log",
"num-bigint", "num-bigint",
"sha3 0.10.1", "sha3 0.10.1",
@@ -2015,7 +2015,7 @@ dependencies = [
"cfg-if 0.1.10", "cfg-if 0.1.10",
"ff", "ff",
"group", "group",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"log", "log",
"num-bigint", "num-bigint",
"num-integer", "num-integer",
@@ -2030,7 +2030,7 @@ dependencies = [
[[package]] [[package]]
name = "halo2curves" name = "halo2curves"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1#9b67e19bca30a35208b0c1b41c1723771e2c9f49" source = "git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19"
dependencies = [ dependencies = [
"ff", "ff",
"group", "group",
@@ -3245,7 +3245,7 @@ version = "0.2.0"
source = "git+https://github.com/scroll-tech/poseidon.git?branch=scroll-dev-0220#2fb4a2385bada39b50dce12fe50cb80d2fd33476" source = "git+https://github.com/scroll-tech/poseidon.git?branch=scroll-dev-0220#2fb4a2385bada39b50dce12fe50cb80d2fd33476"
dependencies = [ dependencies = [
"group", "group",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"subtle", "subtle",
] ]
@@ -4121,7 +4121,7 @@ source = "git+https://github.com/privacy-scaling-explorations/snark-verifier?tag
dependencies = [ dependencies = [
"ecc", "ecc",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)",
"hex", "hex",
"itertools", "itertools",
"lazy_static", "lazy_static",
@@ -5051,7 +5051,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]] [[package]]
name = "types" name = "types"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a" source = "git+https://github.com/scroll-tech/scroll-prover?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"blake2", "blake2",
@@ -5727,7 +5727,7 @@ dependencies = [
[[package]] [[package]]
name = "zkevm" name = "zkevm"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a" source = "git+https://github.com/scroll-tech/scroll-prover?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"blake2", "blake2",

View File

@@ -7,14 +7,17 @@ edition = "2021"
[lib] [lib]
crate-type = ["cdylib"] crate-type = ["cdylib"]
# `//` is used to skip https://github.com/rust-lang/cargo/issues/5478#issuecomment-522719793.
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = 'https://github.com/privacy-scaling-explorations//halo2curves.git', rev = "9b67e19" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"] [patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"] [patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" } poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[dependencies] [dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", rev="78ab7a7" } zkevm = { git = "https://github.com/scroll-tech/scroll-prover", rev="78ab7a7" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", rev="78ab7a7" } types = { git = "https://github.com/scroll-tech/scroll-prover", rev="78ab7a7" }
halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2.git", tag = "v2022_09_10" } halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2.git", tag = "v2022_09_10" }
log = "0.4" log = "0.4"
@@ -32,4 +35,4 @@ debug-assertions = true
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
debug-assertions = true debug-assertions = true

View File

@@ -1,5 +1,5 @@
init_prover(char *params_path, char *seed_path); void init_prover(char *params_path, char *seed_path);
char* create_agg_proof(char *trace); char* create_agg_proof(char *trace);
char* create_agg_proof_multi(char *trace); char* create_agg_proof_multi(char *trace);
init_verifier(char *params_path, char *agg_vk_path); void init_verifier(char *params_path, char *agg_vk_path);
char verify_agg_proof(char *proof); char verify_agg_proof(char *proof);

View File

@@ -1,236 +0,0 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/abi"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
if i == 0 {
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

View File

@@ -11,7 +11,7 @@ import (
func TestNewBatchHeader(t *testing.T) { func TestNewBatchHeader(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -36,7 +36,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
// 1 L1 Msg in 1 bitmap // 1 L1 Msg in 1 bitmap
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -54,7 +54,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs // many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_05.json") templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{} wrappedBlock3 := &WrappedBlock{}
@@ -87,7 +87,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many sparse L1 Msgs in 1 bitmap // many sparse L1 Msgs in 1 bitmap
templateBlockTrace4, err := os.ReadFile("../../../common/testdata/blockTrace_06.json") templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock4 := &WrappedBlock{} wrappedBlock4 := &WrappedBlock{}
@@ -106,7 +106,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many L1 Msgs in each of 2 bitmaps // many L1 Msgs in each of 2 bitmaps
templateBlockTrace5, err := os.ReadFile("../../../common/testdata/blockTrace_07.json") templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock5 := &WrappedBlock{} wrappedBlock5 := &WrappedBlock{}
@@ -127,7 +127,7 @@ func TestNewBatchHeader(t *testing.T) {
func TestBatchHeaderEncode(t *testing.T) { func TestBatchHeaderEncode(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -154,7 +154,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes)) assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
// With L1 Msg // With L1 Msg
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -174,7 +174,7 @@ func TestBatchHeaderEncode(t *testing.T) {
func TestBatchHeaderHash(t *testing.T) { func TestBatchHeaderHash(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -199,7 +199,7 @@ func TestBatchHeaderHash(t *testing.T) {
hash := batchHeader.Hash() hash := batchHeader.Hash()
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes())) assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -216,7 +216,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes())) assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
// With L1 Msg // With L1 Msg
templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{} wrappedBlock3 := &WrappedBlock{}

View File

@@ -1,143 +0,0 @@
package types
import (
"encoding/json"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &geth_types.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}
func TestNewBatchData(t *testing.T) {
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
parentBatch := &BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
StateRoot: "0x0000000000000000000000000000000000000000",
}
batchData1 := NewBatchData(parentBatch, []*WrappedBlock{wrappedBlock}, nil)
assert.NotNil(t, batchData1)
assert.NotNil(t, batchData1.Batch)
assert.Equal(t, "0xac4487c0d8f429dafda3c68cbb8983ac08af83c03c83c365d7df02864f80af37", batchData1.Hash().Hex())
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
parentBatch2 := &BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.Hex(),
}
batchData2 := NewBatchData(parentBatch2, []*WrappedBlock{wrappedBlock2}, nil)
assert.NotNil(t, batchData2)
assert.NotNil(t, batchData2.Batch)
assert.Equal(t, "0x8f1447573740b3e75b979879866b8ad02eecf88e1946275eb8cf14ab95876efc", batchData2.Hash().Hex())
}
func TestBatchDataTimestamp(t *testing.T) {
// Test case 1: when the batch data contains no blocks.
assert.Equal(t, uint64(0), (&BatchData{}).Timestamp())
// Test case 2: when the batch data contains blocks.
batchData := &BatchData{
Batch: abi.IScrollChainBatch{
Blocks: []abi.IScrollChainBlockContext{
{Timestamp: 123456789},
{Timestamp: 234567891},
},
},
}
assert.Equal(t, uint64(123456789), batchData.Timestamp())
}

View File

@@ -6,9 +6,13 @@ import (
"math" "math"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
) )
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash. // WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct { type WrappedBlock struct {
Header *types.Header `json:"header"` Header *types.Header `json:"header"`
@@ -22,7 +26,7 @@ type WrappedBlock struct {
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 { func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64 var lastQueueIndex *uint64
for _, txData := range w.Transactions { for _, txData := range w.Transactions {
if txData.Type == 0x7E { if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce lastQueueIndex = &txData.Nonce
} }
} }
@@ -59,3 +63,74 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
return bytes, nil return bytes, nil
} }
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -32,7 +32,7 @@ func TestChunkEncode(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte") assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
// Test case 3: when the chunk contains one block. // Test case 3: when the chunk contains one block.
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -50,7 +50,7 @@ func TestChunkEncode(t *testing.T) {
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString) assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
// Test case 4: when the chunk contains one block with 1 L1MsgTx // Test case 4: when the chunk contains one block with 1 L1MsgTx
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -92,7 +92,7 @@ func TestChunkHash(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks is 0") assert.Contains(t, err.Error(), "number of blocks is 0")
// Test case 2: successfully hashing a chunk on one block // Test case 2: successfully hashing a chunk on one block
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock)) assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
@@ -106,7 +106,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex()) assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex())
// Test case 3: successfully hashing a chunk on two blocks // Test case 3: successfully hashing a chunk on two blocks
templateBlockTrace1, err := os.ReadFile("../../../common/testdata/blockTrace_03.json") templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock1 := &WrappedBlock{} wrappedBlock1 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1)) assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
@@ -121,7 +121,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex()) assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex())
// Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs // Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs
templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2)) assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))

View File

@@ -2,17 +2,12 @@
package types package types
import ( import (
"database/sql"
"fmt" "fmt"
"time"
) )
// L1BlockStatus represents current l1 block processing status // L1BlockStatus represents current l1 block processing status
type L1BlockStatus int type L1BlockStatus int
// GasOracleStatus represents current gas oracle processing status
type GasOracleStatus int
const ( const (
// L1BlockUndefined : undefined l1 block status // L1BlockUndefined : undefined l1 block status
L1BlockUndefined L1BlockStatus = iota L1BlockUndefined L1BlockStatus = iota
@@ -30,6 +25,9 @@ const (
L1BlockFailed L1BlockFailed
) )
// GasOracleStatus represents current gas oracle processing status
type GasOracleStatus int
const ( const (
// GasOracleUndefined : undefined gas oracle status // GasOracleUndefined : undefined gas oracle status
GasOracleUndefined GasOracleStatus = iota GasOracleUndefined GasOracleStatus = iota
@@ -47,18 +45,21 @@ const (
GasOracleFailed GasOracleFailed
) )
// L1BlockInfo is structure of stored l1 block func (s GasOracleStatus) String() string {
type L1BlockInfo struct { switch s {
Number uint64 `json:"number" db:"number"` case GasOracleUndefined:
Hash string `json:"hash" db:"hash"` return "GasOracleUndefined"
HeaderRLP string `json:"header_rlp" db:"header_rlp"` case GasOraclePending:
BaseFee uint64 `json:"base_fee" db:"base_fee"` return "GasOraclePending"
case GasOracleImporting:
BlockStatus L1BlockStatus `json:"block_status" db:"block_status"` return "GasOracleImporting"
GasOracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"` case GasOracleImported:
return "GasOracleImported"
ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"` case GasOracleFailed:
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"` return "GasOracleFailed"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
} }
// MsgStatus represents current layer1 transaction processing status // MsgStatus represents current layer1 transaction processing status
@@ -87,50 +88,14 @@ const (
MsgRelayFailed MsgRelayFailed
) )
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
QueueIndex uint64 `json:"queue_index" db:"queue_index"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
Nonce uint64 `json:"nonce" db:"nonce"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// BlockInfo is structure of stored `block_trace` without `trace`
type BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
BatchHash sql.NullString `json:"batch_hash" db:"batch_hash"`
TxNum uint64 `json:"tx_num" db:"tx_num"`
GasUsed uint64 `json:"gas_used" db:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
}
// RollerProveStatus is the roller prove status of a block batch (session) // RollerProveStatus is the roller prove status of a block batch (session)
type RollerProveStatus int32 type RollerProveStatus int32
const ( const (
// RollerProveStatusUndefined indicates an unknown roller proving status
RollerProveStatusUndefined RollerProveStatus = iota
// RollerAssigned indicates roller assigned but has not submitted proof // RollerAssigned indicates roller assigned but has not submitted proof
RollerAssigned RollerProveStatus = iota RollerAssigned
// RollerProofValid indicates roller has submitted valid proof // RollerProofValid indicates roller has submitted valid proof
RollerProofValid RollerProofValid
// RollerProofInvalid indicates roller has submitted invalid proof // RollerProofInvalid indicates roller has submitted invalid proof
@@ -150,27 +115,19 @@ func (s RollerProveStatus) String() string {
} }
} }
// RollerStatus is the roller name and roller prove status // RollerFailureType is the type of a roller session's failure
type RollerStatus struct { type RollerFailureType int
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status RollerProveStatus `json:"status"`
}
// SessionInfo is assigned rollers info of a block batch (session) const (
type SessionInfo struct { // RollerFailureTypeUndefined indicates an unknown roller failure type
ID int `json:"id" db:"id"` RollerFailureTypeUndefined RollerFailureType = iota
TaskID string `json:"task_id" db:"task_id"` )
RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
ProveType int16 `json:"prove_type" db:"prove_type"` func (s RollerFailureType) String() string {
RollerName string `json:"roller_name" db:"roller_name"` switch s {
ProvingStatus int16 `json:"proving_status" db:"proving_status"` default:
FailureType int16 `json:"failure_type" db:"failure_type"` return fmt.Sprintf("Undefined (%d)", int32(s))
Reward uint64 `json:"reward" db:"reward"` }
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
} }
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted) // ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -181,8 +138,6 @@ const (
ProvingStatusUndefined ProvingStatus = iota ProvingStatusUndefined ProvingStatus = iota
// ProvingTaskUnassigned : proving_task is not assigned to be proved // ProvingTaskUnassigned : proving_task is not assigned to be proved
ProvingTaskUnassigned ProvingTaskUnassigned
// ProvingTaskSkipped : proving_task is skipped for proof generation
ProvingTaskSkipped
// ProvingTaskAssigned : proving_task is assigned to be proved // ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover // ProvingTaskProved : proof has been returned by prover
@@ -197,8 +152,6 @@ func (ps ProvingStatus) String() string {
switch ps { switch ps {
case ProvingTaskUnassigned: case ProvingTaskUnassigned:
return "unassigned" return "unassigned"
case ProvingTaskSkipped:
return "skipped"
case ProvingTaskAssigned: case ProvingTaskAssigned:
return "assigned" return "assigned"
case ProvingTaskProved: case ProvingTaskProved:
@@ -208,7 +161,32 @@ func (ps ProvingStatus) String() string {
case ProvingTaskFailed: case ProvingTaskFailed:
return "failed" return "failed"
default: default:
return "undefined" return fmt.Sprintf("Undefined (%d)", int32(ps))
}
}
// ChunkProofsStatus describes the proving status of chunks that belong to a batch.
type ChunkProofsStatus int
const (
// ChunkProofsStatusUndefined represents an undefined chunk proofs status
ChunkProofsStatusUndefined ChunkProofsStatus = iota
// ChunkProofsStatusPending means that some chunks that belong to this batch have not been proven
ChunkProofsStatusPending
// ChunkProofsStatusReady means that all chunks that belong to this batch have been proven
ChunkProofsStatusReady
)
func (s ChunkProofsStatus) String() string {
switch s {
case ChunkProofsStatusPending:
return "ChunkProofsStatusPending"
case ChunkProofsStatusReady:
return "ChunkProofsStatusReady"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
} }
} }
@@ -228,51 +206,29 @@ const (
RollupFinalizing RollupFinalizing
// RollupFinalized : finalize transaction is confirmed to layer1 // RollupFinalized : finalize transaction is confirmed to layer1
RollupFinalized RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
// RollupCommitFailed : rollup commit transaction confirmed but failed // RollupCommitFailed : rollup commit transaction confirmed but failed
RollupCommitFailed RollupCommitFailed
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed // RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
RollupFinalizeFailed RollupFinalizeFailed
) )
// BlockBatch is structure of stored block_batch func (s RollupStatus) String() string {
type BlockBatch struct { switch s {
Hash string `json:"hash" db:"hash"` case RollupPending:
Index uint64 `json:"index" db:"index"` return "RollupPending"
ParentHash string `json:"parent_hash" db:"parent_hash"` case RollupCommitting:
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"` return "RollupCommitting"
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"` case RollupCommitted:
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"` return "RollupCommitted"
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"` case RollupFinalizing:
StateRoot string `json:"state_root" db:"state_root"` return "RollupFinalizing"
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"` case RollupFinalized:
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"` return "RollupFinalized"
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"` case RollupCommitFailed:
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"` return "RollupCommitFailed"
Proof []byte `json:"proof" db:"proof"` case RollupFinalizeFailed:
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"` return "RollupFinalizeFailed"
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"` default:
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"` return fmt.Sprintf("Undefined (%d)", int32(s))
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"` }
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}
// AggTask is a wrapper type around db AggProveTask type.
type AggTask struct {
ID string `json:"id" db:"id"`
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
} }

View File

@@ -52,11 +52,6 @@ func TestProvingStatus(t *testing.T) {
ProvingTaskUnassigned, ProvingTaskUnassigned,
"unassigned", "unassigned",
}, },
{
"ProvingTaskSkipped",
ProvingTaskSkipped,
"skipped",
},
{ {
"ProvingTaskAssigned", "ProvingTaskAssigned",
ProvingTaskAssigned, ProvingTaskAssigned,
@@ -80,7 +75,7 @@ func TestProvingStatus(t *testing.T) {
{ {
"Undefined", "Undefined",
ProvingStatus(999), // Invalid value. ProvingStatus(999), // Invalid value.
"undefined", "Undefined (999)",
}, },
} }

View File

@@ -23,25 +23,27 @@ const (
StatusProofError StatusProofError
) )
// ProveType represents the type of roller. // ProofType represents the type of roller.
type ProveType uint8 type ProofType uint8
func (r ProveType) String() string { func (r ProofType) String() string {
switch r { switch r {
case BasicProve: case ProofTypeChunk:
return "Basic Prove" return "proof type chunk"
case AggregatorProve: case ProofTypeBatch:
return "Aggregator Prove" return "proof type batch"
default: default:
return "Illegal Prove type" return "illegal proof type"
} }
} }
const ( const (
// BasicProve is default roller, it only generates zk proof from traces. // ProofTypeUndefined is an unknown proof type
BasicProve ProveType = iota ProofTypeUndefined ProofType = iota
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof. // ProofTypeChunk is default roller, it only generates zk proof from traces.
AggregatorProve ProofTypeChunk
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch
) )
// AuthMsg is the first message exchanged from the Roller to the Sequencer. // AuthMsg is the first message exchanged from the Roller to the Sequencer.
@@ -59,11 +61,9 @@ type Identity struct {
// Roller name // Roller name
Name string `json:"name"` Name string `json:"name"`
// Roller RollerType // Roller RollerType
RollerType ProveType `json:"roller_type,omitempty"` RollerType ProofType `json:"roller_type,omitempty"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version. // Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha" // curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
Version string `json:"version"` Version string `json:"version"`
// Random unique token generated by manager // Random unique token generated by manager
Token string `json:"token"` Token string `json:"token"`
@@ -203,7 +203,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type. // TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct { type TaskMsg struct {
ID string `json:"id"` ID string `json:"id"`
Type ProveType `json:"type,omitempty"` Type ProofType `json:"type,omitempty"`
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers. // For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
BlockHashes []common.Hash `json:"block_hashes,omitempty"` BlockHashes []common.Hash `json:"block_hashes,omitempty"`
// Only applicable for aggregator rollers. // Only applicable for aggregator rollers.
@@ -214,7 +214,7 @@ type TaskMsg struct {
// the proof generation succeeded, and an error message if proof generation failed. // the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct { type ProofDetail struct {
ID string `json:"id"` ID string `json:"id"`
Type ProveType `json:"type,omitempty"` Type ProofType `json:"type,omitempty"`
Status RespStatus `json:"status"` Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"` Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`

View File

@@ -3,7 +3,6 @@ package message
import ( import (
"encoding/hex" "encoding/hex"
"testing" "testing"
"time"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
@@ -16,10 +15,9 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{ authMsg := &AuthMsg{
Identity: &Identity{ Identity: &Identity{
Name: "testName", Name: "testName",
Timestamp: uint32(time.Now().Unix()), Version: "testVersion",
Version: "testVersion", Token: "testToken",
Token: "testToken",
}, },
} }
assert.NoError(t, authMsg.SignWithKey(privkey)) assert.NoError(t, authMsg.SignWithKey(privkey))
@@ -49,15 +47,14 @@ func TestGenerateToken(t *testing.T) {
func TestIdentityHash(t *testing.T) { func TestIdentityHash(t *testing.T) {
identity := &Identity{ identity := &Identity{
Name: "testName", Name: "testName",
RollerType: BasicProve, RollerType: ProofTypeChunk,
Timestamp: uint32(1622428800),
Version: "testVersion", Version: "testVersion",
Token: "testToken", Token: "testToken",
} }
hash, err := identity.Hash() hash, err := identity.Hash()
assert.NoError(t, err) assert.NoError(t, err)
expectedHash := "b3f152958dc881446fc131a250526139d909710c6b91b4d3281ceded28ce2e32" expectedHash := "c0411a19531fb8c6133b2bae91f361c14e65f2d318aef72b83519e6061cad001"
assert.Equal(t, expectedHash, hex.EncodeToString(hash)) assert.Equal(t, expectedHash, hex.EncodeToString(hash))
} }
@@ -68,7 +65,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
proofMsg := &ProofMsg{ proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{ ProofDetail: &ProofDetail{
ID: "testID", ID: "testID",
Type: BasicProve, Type: ProofTypeChunk,
Status: StatusOk, Status: StatusOk,
Proof: &AggProof{ Proof: &AggProof{
Proof: []byte("testProof"), Proof: []byte("testProof"),
@@ -96,7 +93,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
func TestProofDetailHash(t *testing.T) { func TestProofDetailHash(t *testing.T) {
proofDetail := &ProofDetail{ proofDetail := &ProofDetail{
ID: "testID", ID: "testID",
Type: BasicProve, Type: ProofTypeChunk,
Status: StatusOk, Status: StatusOk,
Proof: &AggProof{ Proof: &AggProof{
Proof: []byte("testProof"), Proof: []byte("testProof"),
@@ -109,19 +106,19 @@ func TestProofDetailHash(t *testing.T) {
} }
hash, err := proofDetail.Hash() hash, err := proofDetail.Hash()
assert.NoError(t, err) assert.NoError(t, err)
expectedHash := "fdfaae752d6fd72a7fdd2ad034ef504d3acda9e691a799323cfa6e371684ba2b" expectedHash := "8ad894c2047166a98b1a389b716b06b01dc1bd29e950e2687ffbcb3c328edda5"
assert.Equal(t, expectedHash, hex.EncodeToString(hash)) assert.Equal(t, expectedHash, hex.EncodeToString(hash))
} }
func TestProveTypeString(t *testing.T) { func TestProveTypeString(t *testing.T) {
basicProve := ProveType(0) proofTypeChunk := ProofType(1)
assert.Equal(t, "Basic Prove", basicProve.String()) assert.Equal(t, "proof type chunk", proofTypeChunk.String())
aggregatorProve := ProveType(1) proofTypeBatch := ProofType(2)
assert.Equal(t, "Aggregator Prove", aggregatorProve.String()) assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProve := ProveType(3) illegalProof := ProofType(3)
assert.Equal(t, "Illegal Prove type", illegalProve.String()) assert.Equal(t, "illegal proof type", illegalProof.String())
} }
func TestProofMsgPublicKey(t *testing.T) { func TestProofMsgPublicKey(t *testing.T) {
@@ -131,7 +128,7 @@ func TestProofMsgPublicKey(t *testing.T) {
proofMsg := &ProofMsg{ proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{ ProofDetail: &ProofDetail{
ID: "testID", ID: "testID",
Type: BasicProve, Type: ProofTypeChunk,
Status: StatusOk, Status: StatusOk,
Proof: &AggProof{ Proof: &AggProof{
Proof: []byte("testProof"), Proof: []byte("testProof"),

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v4.0.7" var tag = "v4.0.19"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {
@@ -22,7 +22,7 @@ var commit = func() string {
return "" return ""
}() }()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm // ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
var ZkVersion string var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc. // Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.

View File

@@ -181,6 +181,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---| |---|---|---|
| _0 | address | undefined | | _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### onERC1155BatchReceived ### onERC1155BatchReceived
```solidity ```solidity
@@ -380,6 +396,25 @@ Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
| _tokenIds | uint256[] | undefined | | _tokenIds | uint256[] | undefined |
| _amounts | uint256[] | undefined | | _amounts | uint256[] | undefined |
### BatchRefundERC1155
```solidity
event BatchRefundERC1155(address indexed token, address indexed recipient, uint256[] tokenIds, uint256[] amounts)
```
Emitted when some ERC1155 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenIds | uint256[] | undefined |
| amounts | uint256[] | undefined |
### DepositERC1155 ### DepositERC1155
```solidity ```solidity
@@ -460,6 +495,25 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined | | previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined | | newOwner `indexed` | address | undefined |
### RefundERC1155
```solidity
event RefundERC1155(address indexed token, address indexed recipient, uint256 tokenId, uint256 amount)
```
Emitted when some ERC1155 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateTokenMapping ### UpdateTokenMapping
```solidity ```solidity

View File

@@ -175,6 +175,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---| |---|---|---|
| _0 | address | undefined | | _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### onERC721Received ### onERC721Received
```solidity ```solidity
@@ -324,6 +340,24 @@ Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
| _to | address | undefined | | _to | address | undefined |
| _tokenIds | uint256[] | undefined | | _tokenIds | uint256[] | undefined |
### BatchRefundERC721
```solidity
event BatchRefundERC721(address indexed token, address indexed recipient, uint256[] tokenIds)
```
Emitted when a batch of ERC721 tokens are refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenIds | uint256[] | undefined |
### DepositERC721 ### DepositERC721
```solidity ```solidity
@@ -401,6 +435,24 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined | | previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined | | newOwner `indexed` | address | undefined |
### RefundERC721
```solidity
event RefundERC721(address indexed token, address indexed recipient, uint256 tokenId)
```
Emitted when some ERC721 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateTokenMapping ### UpdateTokenMapping
```solidity ```solidity

View File

@@ -217,6 +217,23 @@ Complete ETH withdraw from L2 to L1 and send fund to recipient&#39;s account in
| _2 | uint256 | undefined | | _2 | uint256 | undefined |
| _3 | bytes | undefined | | _3 | bytes | undefined |
### gatewayInContext
```solidity
function gatewayInContext() external view returns (address)
```
The address of gateway in current execution context.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### getERC20Gateway ### getERC20Gateway
```solidity ```solidity
@@ -306,6 +323,30 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.* *Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
### requestERC20
```solidity
function requestERC20(address _sender, address _token, uint256 _amount) external nonpayable returns (uint256)
```
Request ERC20 token transfer from users to gateways.
*All the gateways should have reentrancy guard to prevent potential attack though this function.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _sender | address | undefined |
| _token | address | undefined |
| _amount | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### setDefaultERC20Gateway ### setDefaultERC20Gateway
```solidity ```solidity
@@ -472,6 +513,41 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined | | previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined | | newOwner `indexed` | address | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### RefundETH
```solidity
event RefundETH(address indexed recipient, uint256 amount)
```
Emitted when some ETH is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### SetDefaultERC20Gateway ### SetDefaultERC20Gateway
```solidity ```solidity

View File

@@ -27,6 +27,26 @@ The address of counterpart ScrollMessenger contract in L1/L2.
|---|---|---| |---|---|---|
| _0 | address | undefined | | _0 | address | undefined |
### dropMessage
```solidity
function dropMessage(address _from, address _to, uint256 _value, uint256 _messageNonce, bytes _message) external nonpayable
```
Drop a skipped message.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _messageNonce | uint256 | undefined |
| _message | bytes | undefined |
### feeVault ### feeVault
```solidity ```solidity
@@ -63,6 +83,28 @@ Initialize the storage of L1ScrollMessenger.
| _rollup | address | The address of ScrollChain contract. | | _rollup | address | The address of ScrollChain contract. |
| _messageQueue | address | The address of L1MessageQueue contract. | | _messageQueue | address | The address of L1MessageQueue contract. |
### isL1MessageDropped
```solidity
function isL1MessageDropped(bytes32) external view returns (bool)
```
Mappging from L1 message hash to drop status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isL1MessageSent ### isL1MessageSent
```solidity ```solidity
@@ -107,6 +149,23 @@ Mapping from L2 message hash to a boolean value indicating if the message has be
|---|---|---| |---|---|---|
| _0 | bool | undefined | | _0 | bool | undefined |
### maxReplayTimes
```solidity
function maxReplayTimes() external view returns (uint256)
```
The maximum number of times each L1 message can be replayed.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### messageQueue ### messageQueue
```solidity ```solidity
@@ -158,6 +217,28 @@ function paused() external view returns (bool)
|---|---|---| |---|---|---|
| _0 | bool | undefined | | _0 | bool | undefined |
### prevReplayIndex
```solidity
function prevReplayIndex(uint256) external view returns (uint256)
```
Mapping from queue index to previous replay queue index.
*If a message `x` was replayed 3 times with index `q1`, `q2` and `q3`, the value of `prevReplayIndex` and `replayStates` will be `replayStates[hash(x)].lastIndex = q3`, `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1` and `prevReplayIndex[q1] = x`.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### relayMessageWithProof ### relayMessageWithProof
```solidity ```solidity
@@ -193,7 +274,7 @@ function renounceOwnership() external nonpayable
### replayMessage ### replayMessage
```solidity ```solidity
function replayMessage(address _from, address _to, uint256 _value, uint256 _queueIndex, bytes _message, uint32 _newGasLimit, address _refundAddress) external payable function replayMessage(address _from, address _to, uint256 _value, uint256 _messageNonce, bytes _message, uint32 _newGasLimit, address _refundAddress) external payable
``` ```
Replay an existing message. Replay an existing message.
@@ -207,11 +288,34 @@ Replay an existing message.
| _from | address | undefined | | _from | address | undefined |
| _to | address | undefined | | _to | address | undefined |
| _value | uint256 | undefined | | _value | uint256 | undefined |
| _queueIndex | uint256 | undefined | | _messageNonce | uint256 | undefined |
| _message | bytes | undefined | | _message | bytes | undefined |
| _newGasLimit | uint32 | undefined | | _newGasLimit | uint32 | undefined |
| _refundAddress | address | undefined | | _refundAddress | address | undefined |
### replayStates
```solidity
function replayStates(bytes32) external view returns (uint128 times, uint128 lastIndex)
```
Mapping from L1 message hash to replay state.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| times | uint128 | undefined |
| lastIndex | uint128 | undefined |
### rollup ### rollup
```solidity ```solidity
@@ -316,6 +420,22 @@ Update fee vault contract.
|---|---|---| |---|---|---|
| _newFeeVault | address | The address of new fee vault contract. | | _newFeeVault | address | The address of new fee vault contract. |
### updateMaxReplayTimes
```solidity
function updateMaxReplayTimes(uint256 _maxReplayTimes) external nonpayable
```
Update max replay times.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _maxReplayTimes | uint256 | The new max replay times. |
### xDomainMessageSender ### xDomainMessageSender
```solidity ```solidity
@@ -456,5 +576,21 @@ Emitted when owner updates fee vault contract.
| _oldFeeVault | address | undefined | | _oldFeeVault | address | undefined |
| _newFeeVault | address | undefined | | _newFeeVault | address | undefined |
### UpdateMaxReplayTimes
```solidity
event UpdateMaxReplayTimes(uint256 maxReplayTimes)
```
Emitted when the maximum number of times each message can be replayed is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| maxReplayTimes | uint256 | undefined |

View File

@@ -198,6 +198,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---| |---|---|---|
| _0 | address | undefined | | _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### router ### router
```solidity ```solidity
@@ -261,5 +277,23 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
| amount | uint256 | undefined | | amount | uint256 | undefined |
| data | bytes | undefined | | data | bytes | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |

View File

@@ -196,6 +196,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---| |---|---|---|
| _0 | address | undefined | | _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### router ### router
```solidity ```solidity
@@ -259,5 +275,23 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
| amount | uint256 | undefined | | amount | uint256 | undefined |
| data | bytes | undefined | | data | bytes | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |

View File

@@ -2,16 +2,7 @@
/* eslint-disable node/no-missing-import */ /* eslint-disable node/no-missing-import */
import { expect } from "chai"; import { expect } from "chai";
import { BigNumber, constants } from "ethers"; import { BigNumber, constants } from "ethers";
import { import { concat, getAddress, hexlify, keccak256, randomBytes, RLP, stripZeros } from "ethers/lib/utils";
concat,
getAddress,
hexlify,
keccak256,
randomBytes,
RLP,
stripZeros,
TransactionTypes,
} from "ethers/lib/utils";
import { ethers } from "hardhat"; import { ethers } from "hardhat";
import { L1MessageQueue, L2GasPriceOracle } from "../typechain"; import { L1MessageQueue, L2GasPriceOracle } from "../typechain";
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers"; import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
@@ -294,4 +285,55 @@ describe("L1MessageQueue", async () => {
} }
}); });
}); });
context("#dropCrossDomainMessage", async () => {
it("should revert, when non-messenger call", async () => {
await expect(queue.connect(signer).dropCrossDomainMessage(0)).to.revertedWith(
"Only callable by the L1ScrollMessenger"
);
});
it("should revert, when drop executed message", async () => {
// append 10 messages
for (let i = 0; i < 10; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
// pop 5 messages with no skip
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 5, 0))
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 5, 0);
for (let i = 0; i < 5; i++) {
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
}
expect(await queue.pendingQueueIndex()).to.eq(5);
for (let i = 0; i < 5; i++) {
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith(
"message already dropped or executed"
);
}
// drop pending message
for (let i = 6; i < 10; i++) {
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith("cannot drop pending message");
}
});
it("should succeed", async () => {
// append 10 messages
for (let i = 0; i < 10; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
// pop 10 messages, all skipped
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 10, 0x3ff))
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 10, 0x3ff);
for (let i = 0; i < 10; i++) {
expect(BigNumber.from(await queue.getCrossDomainMessage(i))).to.gt(constants.Zero);
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.emit(queue, "DropTransaction").withArgs(i);
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
}
});
});
}); });

View File

@@ -5,6 +5,14 @@ pragma solidity ^0.8.0;
import {IScrollMessenger} from "../libraries/IScrollMessenger.sol"; import {IScrollMessenger} from "../libraries/IScrollMessenger.sol";
interface IL1ScrollMessenger is IScrollMessenger { interface IL1ScrollMessenger is IScrollMessenger {
/**********
* Events *
**********/
/// @notice Emitted when the maximum number of times each message can be replayed is updated.
/// @param maxReplayTimes The new maximum number of times each message can be replayed.
event UpdateMaxReplayTimes(uint256 maxReplayTimes);
/*********** /***********
* Structs * * Structs *
***********/ ***********/
@@ -40,7 +48,7 @@ interface IL1ScrollMessenger is IScrollMessenger {
/// @param from The address of the sender of the message. /// @param from The address of the sender of the message.
/// @param to The address of the recipient of the message. /// @param to The address of the recipient of the message.
/// @param value The msg.value passed to the message call. /// @param value The msg.value passed to the message call.
/// @param queueIndex The queue index for the message to replay. /// @param messageNonce The nonce for the message to replay.
/// @param message The content of the message. /// @param message The content of the message.
/// @param newGasLimit New gas limit to be used for this message. /// @param newGasLimit New gas limit to be used for this message.
/// @param refundAddress The address of account who will receive the refunded fee. /// @param refundAddress The address of account who will receive the refunded fee.
@@ -48,9 +56,23 @@ interface IL1ScrollMessenger is IScrollMessenger {
address from, address from,
address to, address to,
uint256 value, uint256 value,
uint256 queueIndex, uint256 messageNonce,
bytes memory message, bytes memory message,
uint32 newGasLimit, uint32 newGasLimit,
address refundAddress address refundAddress
) external payable; ) external payable;
/// @notice Drop a skipped message.
/// @param from The address of the sender of the message.
/// @param to The address of the recipient of the message.
/// @param value The msg.value passed to the message call.
/// @param messageNonce The nonce for the message to drop.
/// @param message The content of the message.
function dropMessage(
address from,
address to,
uint256 value,
uint256 messageNonce,
bytes memory message
) external;
} }

View File

@@ -13,7 +13,10 @@ import {ScrollMessengerBase} from "../libraries/ScrollMessengerBase.sol";
import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol"; import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
import {WithdrawTrieVerifier} from "../libraries/verifier/WithdrawTrieVerifier.sol"; import {WithdrawTrieVerifier} from "../libraries/verifier/WithdrawTrieVerifier.sol";
import {IMessageDropCallback} from "../libraries/callbacks/IMessageDropCallback.sol";
// solhint-disable avoid-low-level-calls // solhint-disable avoid-low-level-calls
// solhint-disable reason-string
/// @title L1ScrollMessenger /// @title L1ScrollMessenger
/// @notice The `L1ScrollMessenger` contract can: /// @notice The `L1ScrollMessenger` contract can:
@@ -26,6 +29,17 @@ import {WithdrawTrieVerifier} from "../libraries/verifier/WithdrawTrieVerifier.s
/// @dev All deposited Ether (including `WETH` deposited throng `L1WETHGateway`) will locked in /// @dev All deposited Ether (including `WETH` deposited throng `L1WETHGateway`) will locked in
/// this contract. /// this contract.
contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1ScrollMessenger { contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1ScrollMessenger {
/***********
* Structs *
***********/
struct ReplayState {
// The number of replayed times.
uint128 times;
// The queue index of lastest replayed one. If it is zero, it means the message has not been replayed.
uint128 lastIndex;
}
/************* /*************
* Variables * * Variables *
*************/ *************/
@@ -36,12 +50,34 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
/// @notice Mapping from L2 message hash to a boolean value indicating if the message has been successfully executed. /// @notice Mapping from L2 message hash to a boolean value indicating if the message has been successfully executed.
mapping(bytes32 => bool) public isL2MessageExecuted; mapping(bytes32 => bool) public isL2MessageExecuted;
/// @notice Mapping from L1 message hash to drop status.
mapping(bytes32 => bool) public isL1MessageDropped;
/// @notice The address of Rollup contract. /// @notice The address of Rollup contract.
address public rollup; address public rollup;
/// @notice The address of L1MessageQueue contract. /// @notice The address of L1MessageQueue contract.
address public messageQueue; address public messageQueue;
/// @notice The maximum number of times each L1 message can be replayed.
uint256 public maxReplayTimes;
/// @notice Mapping from L1 message hash to replay state.
mapping(bytes32 => ReplayState) public replayStates;
/// @notice Mapping from queue index to previous replay queue index.
///
/// @dev If a message `x` was replayed 3 times with index `q1`, `q2` and `q3`, the
/// value of `prevReplayIndex` and `replayStates` will be `replayStates[hash(x)].lastIndex = q3`,
/// `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1`,
/// `prevReplayIndex[q1] = x` and `prevReplayIndex[x]=nil`.
///
/// @dev The index `x` that `prevReplayIndex[x]=nil` is used as the termination of the list.
/// Usually we use `0` to represent `nil`, but we cannot distinguish it with the first message
/// with index zero. So a nonzero offset `1` is added to the value of `prevReplayIndex[x]` to
/// avoid such situation.
mapping(uint256 => uint256) public prevReplayIndex;
/*************** /***************
* Constructor * * Constructor *
***************/ ***************/
@@ -62,9 +98,6 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
rollup = _rollup; rollup = _rollup;
messageQueue = _messageQueue; messageQueue = _messageQueue;
// initialize to a nonzero value
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
} }
/***************************** /*****************************
@@ -78,7 +111,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
bytes memory _message, bytes memory _message,
uint256 _gasLimit uint256 _gasLimit
) external payable override whenNotPaused { ) external payable override whenNotPaused {
_sendMessage(_to, _value, _message, _gasLimit, tx.origin); _sendMessage(_to, _value, _message, _gasLimit, msg.sender);
} }
/// @inheritdoc IScrollMessenger /// @inheritdoc IScrollMessenger
@@ -100,12 +133,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
uint256 _nonce, uint256 _nonce,
bytes memory _message, bytes memory _message,
L2MessageProof memory _proof L2MessageProof memory _proof
) external override whenNotPaused { ) external override whenNotPaused notInExecution {
require(
xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER,
"Message is already in execution"
);
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(_from, _to, _value, _nonce, _message)); bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(_from, _to, _value, _nonce, _message));
require(!isL2MessageExecuted[_xDomainCalldataHash], "Message was already successfully executed"); require(!isL2MessageExecuted[_xDomainCalldataHash], "Message was already successfully executed");
@@ -144,21 +172,23 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
address _from, address _from,
address _to, address _to,
uint256 _value, uint256 _value,
uint256 _queueIndex, uint256 _messageNonce,
bytes memory _message, bytes memory _message,
uint32 _newGasLimit, uint32 _newGasLimit,
address _refundAddress address _refundAddress
) external payable override whenNotPaused { ) external payable override whenNotPaused notInExecution {
// We will use a different `queueIndex` for the replaced message. However, the original `queueIndex` or `nonce` // We will use a different `queueIndex` for the replaced message. However, the original `queueIndex` or `nonce`
// is encoded in the `_message`. We will check the `xDomainCalldata` in layer 2 to avoid duplicated execution. // is encoded in the `_message`. We will check the `xDomainCalldata` in layer 2 to avoid duplicated execution.
// So, only one message will succeed in layer 2. If one of the message is executed successfully, the other one // So, only one message will succeed in layer 2. If one of the message is executed successfully, the other one
// will revert with "Message was already successfully executed". // will revert with "Message was already successfully executed".
address _messageQueue = messageQueue; address _messageQueue = messageQueue;
address _counterpart = counterpart; address _counterpart = counterpart;
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _queueIndex, _message); bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _messageNonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata); bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
require(isL1MessageSent[_xDomainCalldataHash], "Provided message has not been enqueued"); require(isL1MessageSent[_xDomainCalldataHash], "Provided message has not been enqueued");
// cannot replay dropped message
require(!isL1MessageDropped[_xDomainCalldataHash], "Message already dropped");
// compute and deduct the messaging fee to fee vault. // compute and deduct the messaging fee to fee vault.
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_newGasLimit); uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_newGasLimit);
@@ -171,8 +201,28 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
} }
// enqueue the new transaction // enqueue the new transaction
uint256 _nextQueueIndex = IL1MessageQueue(_messageQueue).nextCrossDomainMessageIndex();
IL1MessageQueue(_messageQueue).appendCrossDomainMessage(_counterpart, _newGasLimit, _xDomainCalldata); IL1MessageQueue(_messageQueue).appendCrossDomainMessage(_counterpart, _newGasLimit, _xDomainCalldata);
ReplayState memory _replayState = replayStates[_xDomainCalldataHash];
// update the replayed message chain.
unchecked {
if (_replayState.lastIndex == 0) {
// the message has not been replayed before.
prevReplayIndex[_nextQueueIndex] = _messageNonce + 1;
} else {
prevReplayIndex[_nextQueueIndex] = _replayState.lastIndex + 1;
}
}
_replayState.lastIndex = uint128(_nextQueueIndex);
// update replay times
require(_replayState.times < maxReplayTimes, "Exceed maximum replay times");
unchecked {
_replayState.times += 1;
}
replayStates[_xDomainCalldataHash] = _replayState;
// refund fee to `_refundAddress` // refund fee to `_refundAddress`
unchecked { unchecked {
uint256 _refund = msg.value - _fee; uint256 _refund = msg.value - _fee;
@@ -183,6 +233,60 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
} }
} }
/// @inheritdoc IL1ScrollMessenger
function dropMessage(
address _from,
address _to,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) external override whenNotPaused notInExecution {
// The criteria for dropping a message:
// 1. The message is a L1 message.
// 2. The message has not been dropped before.
// 3. the message and all of its replacement are finalized in L1.
// 4. the message and all of its replacement are skipped.
//
// Possible denial of service attack:
// + replayMessage is called every time someone want to drop the message.
// + replayMessage is called so many times for a skipped message, thus results a long list.
//
// We limit the number of `replayMessage` calls of each message, which may solve the above problem.
address _messageQueue = messageQueue;
// check message exists
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _messageNonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
require(isL1MessageSent[_xDomainCalldataHash], "Provided message has not been enqueued");
// check message not dropped
require(!isL1MessageDropped[_xDomainCalldataHash], "Message already dropped");
// check message is finalized
uint256 _lastIndex = replayStates[_xDomainCalldataHash].lastIndex;
if (_lastIndex == 0) _lastIndex = _messageNonce;
// check message is skipped and drop it.
// @note If the list is very long, the message may never be dropped.
while (true) {
IL1MessageQueue(_messageQueue).dropCrossDomainMessage(_lastIndex);
_lastIndex = prevReplayIndex[_lastIndex];
if (_lastIndex == 0) break;
unchecked {
_lastIndex = _lastIndex - 1;
}
}
isL1MessageDropped[_xDomainCalldataHash] = true;
// set execution context
xDomainMessageSender = ScrollConstants.DROP_XDOMAIN_MESSAGE_SENDER;
IMessageDropCallback(_from).onDropMessage{value: _value}(_message);
// clear execution context
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
}
/************************ /************************
* Restricted Functions * * Restricted Functions *
************************/ ************************/
@@ -198,6 +302,15 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
} }
} }
/// @notice Update max replay times.
/// @dev This function can only called by contract owner.
/// @param _maxReplayTimes The new max replay times.
function updateMaxReplayTimes(uint256 _maxReplayTimes) external onlyOwner {
maxReplayTimes = _maxReplayTimes;
emit UpdateMaxReplayTimes(_maxReplayTimes);
}
/********************** /**********************
* Internal Functions * * Internal Functions *
**********************/ **********************/

View File

@@ -72,6 +72,20 @@ interface IL1ERC1155Gateway {
uint256[] _amounts uint256[] _amounts
); );
/// @notice Emitted when some ERC1155 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenId The id of token refunded.
/// @param amount The amount of token refunded.
event RefundERC1155(address indexed token, address indexed recipient, uint256 tokenId, uint256 amount);
/// @notice Emitted when some ERC1155 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenIds The list of ids of token refunded.
/// @param amounts The list of amount of token refunded.
event BatchRefundERC1155(address indexed token, address indexed recipient, uint256[] tokenIds, uint256[] amounts);
/************************* /*************************
* Public View Functions * * Public View Functions *
*************************/ *************************/

View File

@@ -39,6 +39,12 @@ interface IL1ERC20Gateway {
bytes data bytes data
); );
/// @notice Emitted when some ERC20 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param amount The amount of token refunded to receiver.
event RefundERC20(address indexed token, address indexed recipient, uint256 amount);
/************************* /*************************
* Public View Functions * * Public View Functions *
*************************/ *************************/

View File

@@ -64,6 +64,18 @@ interface IL1ERC721Gateway {
uint256[] _tokenIds uint256[] _tokenIds
); );
/// @notice Emitted when some ERC721 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenId The id of token refunded.
event RefundERC721(address indexed token, address indexed recipient, uint256 tokenId);
/// @notice Emitted when a batch of ERC721 tokens are refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenIds The list of token ids of the ERC721 NFT refunded.
event BatchRefundERC721(address indexed token, address indexed recipient, uint256[] tokenIds);
/***************************** /*****************************
* Public Mutating Functions * * Public Mutating Functions *
*****************************/ *****************************/

View File

@@ -21,6 +21,11 @@ interface IL1ETHGateway {
/// @param data The optional calldata passed to recipient in L2. /// @param data The optional calldata passed to recipient in L2.
event DepositETH(address indexed from, address indexed to, uint256 amount, bytes data); event DepositETH(address indexed from, address indexed to, uint256 amount, bytes data);
/// @notice Emitted when some ETH is refunded.
/// @param recipient The address of receiver in L1.
/// @param amount The amount of ETH refunded to receiver.
event RefundETH(address indexed recipient, uint256 amount);
/***************************** /*****************************
* Public Mutating Functions * * Public Mutating Functions *
*****************************/ *****************************/

View File

@@ -31,6 +31,20 @@ interface IL1GatewayRouter is IL1ETHGateway, IL1ERC20Gateway {
/// @param _token The address of token to query. /// @param _token The address of token to query.
function getERC20Gateway(address _token) external view returns (address); function getERC20Gateway(address _token) external view returns (address);
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request ERC20 token transfer from users to gateways.
/// @param sender The address of sender to request fund.
/// @param token The address of token to request.
/// @param amount The amount of token to request.
function requestERC20(
address sender,
address token,
uint256 amount
) external returns (uint256);
/************************ /************************
* Restricted Functions * * Restricted Functions *
************************/ ************************/

View File

@@ -65,32 +65,6 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
return tokenMapping[_l1Token]; return tokenMapping[_l1Token];
} }
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
// @note can possible trigger reentrant call to this contract or messenger,
// but it seems not a big problem.
IERC20Upgradeable(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/************************ /************************
* Restricted Functions * * Restricted Functions *
************************/ ************************/
@@ -110,6 +84,29 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
* Internal Functions * * Internal Functions *
**********************/ **********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address,
address,
uint256,
bytes calldata
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway /// @inheritdoc L1ERC20Gateway
function _deposit( function _deposit(
address _token, address _token,
@@ -121,25 +118,11 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
address _l2Token = tokenMapping[_token]; address _l2Token = tokenMapping[_token];
require(_l2Token != address(0), "no corresponding l2 token"); require(_l2Token != address(0), "no corresponding l2 token");
// 1. Extract real sender if this call is from L1GatewayRouter. // 1. Transfer token into this contract.
address _from = msg.sender; address _from;
if (router == msg.sender) { (_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
(_from, _data) = abi.decode(_data, (address, bytes));
}
// 2. Transfer token into this contract. // 2. Generate message passed to L2CustomERC20Gateway.
{
// common practice to handle fee on transfer token.
uint256 _before = IERC20Upgradeable(_token).balanceOf(address(this));
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
uint256 _after = IERC20Upgradeable(_token).balanceOf(address(this));
// no unchecked here, since some weird token may return arbitrary balance.
_amount = _after - _before;
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
}
// 3. Generate message passed to L2StandardERC20Gateway.
bytes memory _message = abi.encodeWithSelector( bytes memory _message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector, IL2ERC20Gateway.finalizeDepositERC20.selector,
_token, _token,
@@ -150,8 +133,8 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
_data _data
); );
// 4. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data); emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data);
} }

View File

@@ -10,6 +10,7 @@ import {IL2ERC1155Gateway} from "../../L2/gateways/IL2ERC1155Gateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol"; import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ERC1155Gateway} from "./IL1ERC1155Gateway.sol"; import {IL1ERC1155Gateway} from "./IL1ERC1155Gateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol"; import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC1155Gateway /// @title L1ERC1155Gateway
@@ -19,7 +20,13 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// NFT will be transfer to the recipient directly. /// NFT will be transfer to the recipient directly.
/// ///
/// This will be changed if we have more specific scenarios. /// This will be changed if we have more specific scenarios.
contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL1ERC1155Gateway { contract L1ERC1155Gateway is
OwnableUpgradeable,
ERC1155HolderUpgradeable,
ScrollGatewayBase,
IL1ERC1155Gateway,
IMessageDropCallback
{
/********** /**********
* Events * * Events *
**********/ **********/
@@ -105,7 +112,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to, address _to,
uint256 _tokenId, uint256 _tokenId,
uint256 _amount uint256 _amount
) external override onlyCallByCounterpart nonReentrant { ) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0"); require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch"); require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -122,7 +129,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
address _to, address _to,
uint256[] calldata _tokenIds, uint256[] calldata _tokenIds,
uint256[] calldata _amounts uint256[] calldata _amounts
) external override onlyCallByCounterpart nonReentrant { ) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0"); require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch"); require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -131,6 +138,31 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
emit FinalizeBatchWithdrawERC1155(_l1Token, _l2Token, _from, _to, _tokenIds, _amounts); emit FinalizeBatchWithdrawERC1155(_l1Token, _l2Token, _from, _to, _tokenIds, _amounts);
} }
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
require(msg.value == 0, "nonzero msg.value");
if (bytes4(_message[0:4]) == IL2ERC1155Gateway.finalizeDepositERC1155.selector) {
(address _token, , address _sender, , uint256 _tokenId, uint256 _amount) = abi.decode(
_message[4:],
(address, address, address, address, uint256, uint256)
);
IERC1155Upgradeable(_token).safeTransferFrom(address(this), _sender, _tokenId, _amount, "");
emit RefundERC1155(_token, _sender, _tokenId, _amount);
} else if (bytes4(_message[0:4]) == IL2ERC1155Gateway.finalizeBatchDepositERC1155.selector) {
(address _token, , address _sender, , uint256[] memory _tokenIds, uint256[] memory _amounts) = abi.decode(
_message[4:],
(address, address, address, address, uint256[], uint256[])
);
IERC1155Upgradeable(_token).safeBatchTransferFrom(address(this), _sender, _tokenIds, _amounts, "");
emit BatchRefundERC1155(_token, _sender, _tokenIds, _amounts);
} else {
revert("invalid selector");
}
}
/************************ /************************
* Restricted Functions * * Restricted Functions *
************************/ ************************/
@@ -162,7 +194,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256 _tokenId, uint256 _tokenId,
uint256 _amount, uint256 _amount,
uint256 _gasLimit uint256 _gasLimit
) internal nonReentrant { ) internal virtual nonReentrant {
require(_amount > 0, "deposit zero amount"); require(_amount > 0, "deposit zero amount");
address _l2Token = tokenMapping[_token]; address _l2Token = tokenMapping[_token];
@@ -183,7 +215,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
); );
// 3. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit DepositERC1155(_token, _l2Token, msg.sender, _to, _tokenId, _amount); emit DepositERC1155(_token, _l2Token, msg.sender, _to, _tokenId, _amount);
} }
@@ -200,7 +232,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256[] calldata _tokenIds, uint256[] calldata _tokenIds,
uint256[] calldata _amounts, uint256[] calldata _amounts,
uint256 _gasLimit uint256 _gasLimit
) internal nonReentrant { ) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to deposit"); require(_tokenIds.length > 0, "no token to deposit");
require(_tokenIds.length == _amounts.length, "length mismatch"); require(_tokenIds.length == _amounts.length, "length mismatch");
@@ -226,7 +258,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
); );
// 3. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit BatchDepositERC1155(_token, _l2Token, msg.sender, _to, _tokenIds, _amounts); emit BatchDepositERC1155(_token, _l2Token, msg.sender, _to, _tokenIds, _amounts);
} }

View File

@@ -2,11 +2,25 @@
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
import {IL1ERC20Gateway} from "./IL1ERC20Gateway.sol"; import {IL1ERC20Gateway} from "./IL1ERC20Gateway.sol";
import {IL1GatewayRouter} from "./IL1GatewayRouter.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {IL2ERC20Gateway} from "../../L2/gateways/IL2ERC20Gateway.sol";
import {IScrollMessenger} from "../../libraries/IScrollMessenger.sol";
import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
// solhint-disable no-empty-blocks // solhint-disable no-empty-blocks
abstract contract L1ERC20Gateway is IL1ERC20Gateway { abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, ScrollGatewayBase {
using SafeERC20 for IERC20;
/***************************** /*****************************
* Public Mutating Functions * * Public Mutating Functions *
*****************************/ *****************************/
@@ -41,10 +55,110 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway {
_deposit(_token, _to, _amount, _data, _gasLimit); _deposit(_token, _to, _amount, _data, _gasLimit);
} }
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
_beforeFinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
// @note can possible trigger reentrant call to this contract or messenger,
// but it seems not a big problem.
IERC20(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
// _message should start with 0x8431f5c1 => finalizeDepositERC20(address,address,address,address,uint256,bytes)
require(bytes4(_message[0:4]) == IL2ERC20Gateway.finalizeDepositERC20.selector, "invalid selector");
// decode (token, receiver, amount)
(address _token, , address _receiver, , uint256 _amount, ) = abi.decode(
_message[4:],
(address, address, address, address, uint256, bytes)
);
// do dome check for each custom gateway
_beforeDropMessage(_token, _receiver, _amount);
IERC20(_token).safeTransfer(_receiver, _amount);
emit RefundERC20(_token, _receiver, _amount);
}
/********************** /**********************
* Internal Functions * * Internal Functions *
**********************/ **********************/
/// @dev Internal function hook to perform checks and actions before finalizing the withdrawal.
/// @param _l1Token The address of corresponding L1 token in L1.
/// @param _l2Token The address of corresponding L2 token in L2.
/// @param _from The address of account who withdraw the token in L2.
/// @param _to The address of recipient in L1 to receive the token.
/// @param _amount The amount of the token to withdraw.
/// @param _data Optional data to forward to recipient's account.
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) internal virtual;
/// @dev Internal function hook to perform checks and actions before dropping the message.
/// @param _token The L1 token address.
/// @param _receiver The recipient address on L1.
/// @param _amount The amount of token to refund.
function _beforeDropMessage(
address _token,
address _receiver,
uint256 _amount
) internal virtual;
/// @dev Internal function to transfer ERC20 token to this contract.
/// @param _token The address of token to transfer.
/// @param _amount The amount of token to transfer.
/// @param _data The data passed by caller.
function _transferERC20In(
address _token,
uint256 _amount,
bytes memory _data
)
internal
returns (
address,
uint256,
bytes memory
)
{
address _from = msg.sender;
if (router == msg.sender) {
// Extract real sender if this call is from L1GatewayRouter.
(_from, _data) = abi.decode(_data, (address, bytes));
_amount = IL1GatewayRouter(msg.sender).requestERC20(_from, _token, _amount);
} else {
// common practice to handle fee on transfer token.
uint256 _before = IERC20(_token).balanceOf(address(this));
IERC20(_token).safeTransferFrom(_from, address(this), _amount);
uint256 _after = IERC20(_token).balanceOf(address(this));
// no unchecked here, since some weird token may return arbitrary balance.
_amount = _after - _before;
}
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
return (_from, _amount, _data);
}
/// @dev Internal function to do all the deposit operations. /// @dev Internal function to do all the deposit operations.
/// ///
/// @param _token The token to deposit. /// @param _token The token to deposit.

View File

@@ -10,6 +10,7 @@ import {IL2ERC721Gateway} from "../../L2/gateways/IL2ERC721Gateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol"; import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ERC721Gateway} from "./IL1ERC721Gateway.sol"; import {IL1ERC721Gateway} from "./IL1ERC721Gateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol"; import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC721Gateway /// @title L1ERC721Gateway
@@ -19,7 +20,13 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// NFT will be transfer to the recipient directly. /// NFT will be transfer to the recipient directly.
/// ///
/// This will be changed if we have more specific scenarios. /// This will be changed if we have more specific scenarios.
contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL1ERC721Gateway { contract L1ERC721Gateway is
OwnableUpgradeable,
ERC721HolderUpgradeable,
ScrollGatewayBase,
IL1ERC721Gateway,
IMessageDropCallback
{
/********** /**********
* Events * * Events *
**********/ **********/
@@ -99,7 +106,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from, address _from,
address _to, address _to,
uint256 _tokenId uint256 _tokenId
) external override onlyCallByCounterpart nonReentrant { ) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0"); require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch"); require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -115,7 +122,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _from, address _from,
address _to, address _to,
uint256[] calldata _tokenIds uint256[] calldata _tokenIds
) external override onlyCallByCounterpart nonReentrant { ) external virtual onlyCallByCounterpart nonReentrant {
require(_l2Token != address(0), "token address cannot be 0"); require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch"); require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
@@ -126,6 +133,32 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
emit FinalizeBatchWithdrawERC721(_l1Token, _l2Token, _from, _to, _tokenIds); emit FinalizeBatchWithdrawERC721(_l1Token, _l2Token, _from, _to, _tokenIds);
} }
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
require(msg.value == 0, "nonzero msg.value");
if (bytes4(_message[0:4]) == IL2ERC721Gateway.finalizeDepositERC721.selector) {
(address _token, , address _receiver, , uint256 _tokenId) = abi.decode(
_message[4:],
(address, address, address, address, uint256)
);
IERC721Upgradeable(_token).safeTransferFrom(address(this), _receiver, _tokenId);
emit RefundERC721(_token, _receiver, _tokenId);
} else if (bytes4(_message[0:4]) == IL2ERC721Gateway.finalizeBatchDepositERC721.selector) {
(address _token, , address _receiver, , uint256[] memory _tokenIds) = abi.decode(
_message[4:],
(address, address, address, address, uint256[])
);
for (uint256 i = 0; i < _tokenIds.length; i++) {
IERC721Upgradeable(_token).safeTransferFrom(address(this), _receiver, _tokenIds[i]);
}
emit BatchRefundERC721(_token, _receiver, _tokenIds);
} else {
revert("invalid selector");
}
}
/************************ /************************
* Restricted Functions * * Restricted Functions *
************************/ ************************/
@@ -155,7 +188,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to, address _to,
uint256 _tokenId, uint256 _tokenId,
uint256 _gasLimit uint256 _gasLimit
) internal nonReentrant { ) internal virtual nonReentrant {
address _l2Token = tokenMapping[_token]; address _l2Token = tokenMapping[_token];
require(_l2Token != address(0), "no corresponding l2 token"); require(_l2Token != address(0), "no corresponding l2 token");
@@ -173,7 +206,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
); );
// 3. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit DepositERC721(_token, _l2Token, msg.sender, _to, _tokenId); emit DepositERC721(_token, _l2Token, msg.sender, _to, _tokenId);
} }
@@ -188,7 +221,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to, address _to,
uint256[] calldata _tokenIds, uint256[] calldata _tokenIds,
uint256 _gasLimit uint256 _gasLimit
) internal nonReentrant { ) internal virtual nonReentrant {
require(_tokenIds.length > 0, "no token to deposit"); require(_tokenIds.length > 0, "no token to deposit");
address _l2Token = tokenMapping[_token]; address _l2Token = tokenMapping[_token];
@@ -210,7 +243,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
); );
// 3. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit BatchDepositERC721(_token, _l2Token, msg.sender, _to, _tokenIds); emit BatchDepositERC721(_token, _l2Token, msg.sender, _to, _tokenIds);
} }

View File

@@ -8,14 +8,17 @@ import {IL2ETHGateway} from "../../L2/gateways/IL2ETHGateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol"; import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ETHGateway} from "./IL1ETHGateway.sol"; import {IL1ETHGateway} from "./IL1ETHGateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol"; import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
// solhint-disable avoid-low-level-calls
/// @title L1ETHGateway /// @title L1ETHGateway
/// @notice The `L1ETHGateway` is used to deposit ETH in layer 1 and /// @notice The `L1ETHGateway` is used to deposit ETH in layer 1 and
/// finalize withdraw ETH from layer 2. /// finalize withdraw ETH from layer 2.
/// @dev The deposited ETH tokens are held in this gateway. On finalizing withdraw, the corresponding /// @dev The deposited ETH tokens are held in this gateway. On finalizing withdraw, the corresponding
/// ETH will be transfer to the recipient directly. /// ETH will be transfer to the recipient directly.
contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway { contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback {
/*************** /***************
* Constructor * * Constructor *
***************/ ***************/
@@ -72,7 +75,6 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
// @note can possible trigger reentrant call to messenger, // @note can possible trigger reentrant call to messenger,
// but it seems not a big problem. // but it seems not a big problem.
// solhint-disable-next-line avoid-low-level-calls
(bool _success, ) = _to.call{value: _amount}(""); (bool _success, ) = _to.call{value: _amount}("");
require(_success, "ETH transfer failed"); require(_success, "ETH transfer failed");
@@ -81,6 +83,22 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
emit FinalizeWithdrawETH(_from, _to, _amount, _data); emit FinalizeWithdrawETH(_from, _to, _amount, _data);
} }
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
// _message should start with 0x232e8748 => finalizeDepositETH(address,address,uint256,bytes)
require(bytes4(_message[0:4]) == IL2ETHGateway.finalizeDepositETH.selector, "invalid selector");
// decode (receiver, amount)
(address _receiver, , uint256 _amount, ) = abi.decode(_message[4:], (address, address, uint256, bytes));
require(_amount == msg.value, "msg.value mismatch");
(bool _success, ) = _receiver.call{value: _amount}("");
require(_success, "ETH transfer failed");
emit RefundETH(_receiver, _amount);
}
/********************** /**********************
* Internal Functions * * Internal Functions *
**********************/ **********************/
@@ -95,7 +113,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
uint256 _amount, uint256 _amount,
bytes memory _data, bytes memory _data,
uint256 _gasLimit uint256 _gasLimit
) internal nonReentrant { ) internal virtual nonReentrant {
require(_amount > 0, "deposit zero eth"); require(_amount > 0, "deposit zero eth");
// 1. Extract real sender if this call is from L1GatewayRouter. // 1. Extract real sender if this call is from L1GatewayRouter.
@@ -113,7 +131,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
_data _data
); );
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit, _from);
emit DepositETH(_from, _to, _amount, _data); emit DepositETH(_from, _to, _amount, _data);
} }

View File

@@ -3,8 +3,9 @@
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IL2GatewayRouter} from "../../L2/gateways/IL2GatewayRouter.sol";
import {IScrollGateway} from "../../libraries/gateway/IScrollGateway.sol"; import {IScrollGateway} from "../../libraries/gateway/IScrollGateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol"; import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ETHGateway} from "./IL1ETHGateway.sol"; import {IL1ETHGateway} from "./IL1ETHGateway.sol";
@@ -17,6 +18,8 @@ import {IL1GatewayRouter} from "./IL1GatewayRouter.sol";
/// @dev One can also use this contract to query L1/L2 token address mapping. /// @dev One can also use this contract to query L1/L2 token address mapping.
/// In the future, ERC-721 and ERC-1155 tokens will be added to the router too. /// In the future, ERC-721 and ERC-1155 tokens will be added to the router too.
contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter { contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
using SafeERC20Upgradeable for IERC20Upgradeable;
/************* /*************
* Variables * * Variables *
*************/ *************/
@@ -31,6 +34,23 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
// solhint-disable-next-line var-name-mixedcase // solhint-disable-next-line var-name-mixedcase
mapping(address => address) public ERC20Gateway; mapping(address => address) public ERC20Gateway;
/// @notice The address of gateway in current execution context.
address public gatewayInContext;
/**********************
* Function Modifiers *
**********************/
modifier onlyNotInContext() {
require(gatewayInContext == address(0), "Only not in context");
_;
}
modifier onlyInContext() {
require(msg.sender == gatewayInContext, "Only in deposit context");
_;
}
/*************** /***************
* Constructor * * Constructor *
***************/ ***************/
@@ -77,6 +97,23 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
return _gateway; return _gateway;
} }
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1GatewayRouter
/// @dev All the gateways should have reentrancy guard to prevent potential attack though this function.
function requestERC20(
address _sender,
address _token,
uint256 _amount
) external onlyInContext returns (uint256) {
uint256 _balance = IERC20Upgradeable(_token).balanceOf(msg.sender);
IERC20Upgradeable(_token).safeTransferFrom(_sender, msg.sender, _amount);
_amount = IERC20Upgradeable(_token).balanceOf(msg.sender) - _balance;
return _amount;
}
/************************************************* /*************************************************
* Public Mutating Functions from L1ERC20Gateway * * Public Mutating Functions from L1ERC20Gateway *
*************************************************/ *************************************************/
@@ -107,14 +144,20 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
uint256 _amount, uint256 _amount,
bytes memory _data, bytes memory _data,
uint256 _gasLimit uint256 _gasLimit
) public payable override { ) public payable override onlyNotInContext {
address _gateway = getERC20Gateway(_token); address _gateway = getERC20Gateway(_token);
require(_gateway != address(0), "no gateway available"); require(_gateway != address(0), "no gateway available");
// enter deposit context
gatewayInContext = _gateway;
// encode msg.sender with _data // encode msg.sender with _data
bytes memory _routerData = abi.encode(msg.sender, _data); bytes memory _routerData = abi.encode(msg.sender, _data);
IL1ERC20Gateway(_gateway).depositERC20AndCall{value: msg.value}(_token, _to, _amount, _routerData, _gasLimit); IL1ERC20Gateway(_gateway).depositERC20AndCall{value: msg.value}(_token, _to, _amount, _routerData, _gasLimit);
// leave deposit context
gatewayInContext = address(0);
} }
/// @inheritdoc IL1ERC20Gateway /// @inheritdoc IL1ERC20Gateway
@@ -153,7 +196,7 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
uint256 _amount, uint256 _amount,
bytes memory _data, bytes memory _data,
uint256 _gasLimit uint256 _gasLimit
) public payable override { ) public payable override onlyNotInContext {
address _gateway = ethGateway; address _gateway = ethGateway;
require(_gateway != address(0), "eth gateway available"); require(_gateway != address(0), "eth gateway available");

View File

@@ -80,35 +80,40 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
return Clones.predictDeterministicAddress(l2TokenImplementation, _salt, l2TokenFactory); return Clones.predictDeterministicAddress(l2TokenImplementation, _salt, l2TokenFactory);
} }
/***************************** /**********************
* Public Mutating Functions * * Internal Functions *
*****************************/ **********************/
/// @inheritdoc IL1ERC20Gateway /// @inheritdoc L1ERC20Gateway
function finalizeWithdrawERC20( function _beforeFinalizeWithdrawERC20(
address _l1Token, address _l1Token,
address _l2Token, address _l2Token,
address _from, address,
address _to, address,
uint256 _amount, uint256,
bytes calldata _data bytes calldata
) external payable override onlyCallByCounterpart nonReentrant { ) internal virtual override {
require(msg.value == 0, "nonzero msg.value"); require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0"); require(_l2Token != address(0), "token address cannot be 0");
require(getL2ERC20Address(_l1Token) == _l2Token, "l2 token mismatch"); require(getL2ERC20Address(_l1Token) == _l2Token, "l2 token mismatch");
// @note can possible trigger reentrant call to messenger, // update `tokenMapping` on first withdraw
// but it seems not a big problem. address _storedL2Token = tokenMapping[_l1Token];
IERC20(_l1Token).safeTransfer(_to, _amount); if (_storedL2Token == address(0)) {
tokenMapping[_l1Token] = _l2Token;
_doCallback(_to, _data); } else {
require(_storedL2Token == _l2Token, "l2 token mismatch");
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data); }
} }
/********************** /// @inheritdoc L1ERC20Gateway
* Internal Functions * function _beforeDropMessage(
**********************/ address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway /// @inheritdoc L1ERC20Gateway
function _deposit( function _deposit(
@@ -120,37 +125,26 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
) internal virtual override nonReentrant { ) internal virtual override nonReentrant {
require(_amount > 0, "deposit zero amount"); require(_amount > 0, "deposit zero amount");
// 1. Extract real sender if this call is from L1GatewayRouter. // 1. Transfer token into this contract.
address _from = msg.sender; address _from;
if (router == msg.sender) { (_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
(_from, _data) = abi.decode(_data, (address, bytes));
}
// 2. Transfer token into this contract. // 2. Generate message passed to L2StandardERC20Gateway.
{
// common practice to handle fee on transfer token.
uint256 _before = IERC20(_token).balanceOf(address(this));
IERC20(_token).safeTransferFrom(_from, address(this), _amount);
uint256 _after = IERC20(_token).balanceOf(address(this));
// no unchecked here, since some weird token may return arbitrary balance.
_amount = _after - _before;
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
}
// 3. Generate message passed to L2StandardERC20Gateway.
address _l2Token = tokenMapping[_token]; address _l2Token = tokenMapping[_token];
bytes memory _l2Data = _data; bytes memory _l2Data;
if (_l2Token == address(0)) { if (_l2Token == address(0)) {
// It is a new token, compute and store mapping in storage. // @note we won't update `tokenMapping` here but update the `tokenMapping` on
// first successful withdraw. This will prevent user to set arbitrary token
// metadata by setting a very small `_gasLimit` on the first tx.
_l2Token = getL2ERC20Address(_token); _l2Token = getL2ERC20Address(_token);
tokenMapping[_token] = _l2Token;
// passing symbol/name/decimal in order to deploy in L2. // passing symbol/name/decimal in order to deploy in L2.
string memory _symbol = IERC20Metadata(_token).symbol(); string memory _symbol = IERC20Metadata(_token).symbol();
string memory _name = IERC20Metadata(_token).name(); string memory _name = IERC20Metadata(_token).name();
uint8 _decimals = IERC20Metadata(_token).decimals(); uint8 _decimals = IERC20Metadata(_token).decimals();
_l2Data = abi.encode(_data, abi.encode(_symbol, _name, _decimals)); _l2Data = abi.encode(true, abi.encode(_data, abi.encode(_symbol, _name, _decimals)));
} else {
_l2Data = abi.encode(false, _data);
} }
bytes memory _message = abi.encodeWithSelector( bytes memory _message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector, IL2ERC20Gateway.finalizeDepositERC20.selector,
@@ -162,8 +156,8 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
_l2Data _l2Data
); );
// 4. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit); IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data); emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data);
} }

View File

@@ -70,34 +70,37 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
return l2WETH; return l2WETH;
} }
/***************************** /**********************
* Public Mutating Functions * * Internal Functions *
*****************************/ **********************/
/// @inheritdoc IL1ERC20Gateway /// @inheritdoc L1ERC20Gateway
function finalizeWithdrawERC20( function _beforeFinalizeWithdrawERC20(
address _l1Token, address _l1Token,
address _l2Token, address _l2Token,
address _from, address,
address _to, address,
uint256 _amount, uint256 _amount,
bytes calldata _data bytes calldata
) external payable override onlyCallByCounterpart nonReentrant { ) internal virtual override {
require(_l1Token == WETH, "l1 token not WETH"); require(_l1Token == WETH, "l1 token not WETH");
require(_l2Token == l2WETH, "l2 token not WETH"); require(_l2Token == l2WETH, "l2 token not WETH");
require(_amount == msg.value, "msg.value mismatch"); require(_amount == msg.value, "msg.value mismatch");
IWETH(_l1Token).deposit{value: _amount}(); IWETH(_l1Token).deposit{value: _amount}();
IERC20(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
} }
/********************** /// @inheritdoc L1ERC20Gateway
* Internal Functions * function _beforeDropMessage(
**********************/ address _token,
address,
uint256 _amount
) internal virtual override {
require(_token == WETH, "token not WETH");
require(_amount == msg.value, "msg.value mismatch");
IWETH(_token).deposit{value: _amount}();
}
/// @inheritdoc L1ERC20Gateway /// @inheritdoc L1ERC20Gateway
function _deposit( function _deposit(
@@ -110,17 +113,12 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
require(_amount > 0, "deposit zero amount"); require(_amount > 0, "deposit zero amount");
require(_token == WETH, "only WETH is allowed"); require(_token == WETH, "only WETH is allowed");
// 1. Extract real sender if this call is from L1GatewayRouter. // 1. Transfer token into this contract.
address _from = msg.sender; address _from;
if (router == msg.sender) { (_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
(_from, _data) = abi.decode(_data, (address, bytes));
}
// 2. Transfer token into this contract.
IERC20(_token).safeTransferFrom(_from, address(this), _amount);
IWETH(_token).withdraw(_amount); IWETH(_token).withdraw(_amount);
// 3. Generate message passed to L2StandardERC20Gateway. // 2. Generate message passed to L2WETHGateway.
bytes memory _message = abi.encodeWithSelector( bytes memory _message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector, IL2ERC20Gateway.finalizeDepositERC20.selector,
_token, _token,
@@ -131,12 +129,13 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
_data _data
); );
// 4. Send message to L1ScrollMessenger. // 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: _amount + msg.value}( IL1ScrollMessenger(messenger).sendMessage{value: _amount + msg.value}(
counterpart, counterpart,
_amount, _amount,
_message, _message,
_gasLimit _gasLimit,
_from
); );
emit DepositERC20(_token, l2WETH, _from, _to, _amount, _data); emit DepositERC20(_token, l2WETH, _from, _to, _amount, _data);

View File

@@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {L1CustomERC20Gateway} from "../L1CustomERC20Gateway.sol";
// solhint-disable no-empty-blocks
contract L1USDCGateway is L1CustomERC20Gateway {
}

Some files were not shown because too many files have changed in this diff Show More