Compare commits

..

29 Commits

Author SHA1 Message Date
colin
076cfc1eae refactor(relayer): optimize l1 block schema (#648)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 17:26:43 +08:00
Xi Lin
25e4b6c19d fix(contracts): OZ-L1-L04 Lost Funds in Messenger Contracts (#637)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 17:08:02 +08:00
Péter Garamvölgyi
bae38de0d3 fix(contracts): OZ-L1-L05 Outdated OpenZeppelin Library Version (#622)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: zimpha <zimpha@gmail.com>
2023-07-20 16:54:43 +08:00
Xi Lin
c9f623b12b fix(contracts): OZ-L1-L01 Batch Reverting Can Pause Finalization (#634)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:36:37 +08:00
Xi Lin
3491f0ebff fix(contracts): OZ-L1-L08 Batch Events Lack Information (#624)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:24:27 +08:00
Xi Lin
ff4a9e1dd2 fix(contracts): OZ-L1-L07 Lack of Logs on Sensitive Actions (#623)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:10:06 +08:00
Xi Lin
f8ec59f7e1 fix(contracts): OZ-L1-M02 Lack of Upgradeability Storage Gaps (#618)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:52:48 +08:00
Xi Lin
571a577231 doc(contracts)/fix(contracts): OZ-L1-M03 WithdrawTrieVerifier Proves Intermediate Nodes (#619)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:42:11 +08:00
Lawliet-Chan
e3b451c641 feat(prover-stats-api): add prover stats API (#635)
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:33:29 +08:00
Xi Lin
2d6a195d52 fix(contracts): OZ-L1-H03 Incorrect Depth Calculation for Extension Nodes Allows Denial-of-Service (#617)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 14:48:09 +08:00
ChuhanJin
577cc90a34 refactor(bridge-history-api): fix go lint (#638)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-18 18:35:45 +08:00
Ahmed Castro
ecd3a61a86 refactor(contracts): add token interfaces extensions (#654) 2023-07-18 15:58:10 +08:00
ChuhanJin
b4cb30e2a1 feat(bridge-history-api): add watch nft batch events watching (#643)
Co-authored-by: vincent <419436363@qq.com>
2023-07-17 21:10:03 +08:00
colin
d9b8891803 refactor(chunk & batch proposer): optimize logging (#653) 2023-07-17 15:28:25 +08:00
ChuhanJin
6db2c0a8cb fix(bridge-history-api): crossMsgs can be empty (#646)
Co-authored-by: vincent <419436363@qq.com>
2023-07-14 17:39:37 +08:00
ChuhanJin
0caf0d4052 fix(bridge-history-api): fix claimable api sql issue (#645)
Co-authored-by: vincent <419436363@qq.com>
2023-07-14 15:26:31 +08:00
vyzo
95f2f7da0f fix(bridge): adjust gas fee cap in resumbitTransaction for rising basefee (#625)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-13 17:29:17 +08:00
Xi Lin
d2a1459768 fix(contracts): fix dropping message with nonce 0 (#640)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-13 16:48:45 +08:00
colin
f38dda8e02 feat(relayer): remove 'skipped' proving/rollup status and related code/tests (#642) 2023-07-13 16:39:02 +08:00
HAOYUatHZ
189ef09938 ci(github): only trigger build when merging (#641) 2023-07-13 09:07:26 +08:00
Péter Garamvölgyi
b79832566c feat(coordinator): Remove timestamp from roller protocol (#236) 2023-07-11 20:35:28 -07:00
Xi Lin
6841ef264c feat(contracts): add refund for skipped messages (#561)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-11 09:16:37 -07:00
ChuhanJin
425f74e763 feat(bridge-history-api): add new api to fetch all claimable txs under one address (#607)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-11 23:10:06 +08:00
georgehao
d59b2b4c41 refactor(coordinator): refactor task_prover's reward to decimal (#633)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-11 19:25:57 +08:00
Xi Lin
2323dd0daa fix(contracts): OZ-L1-H05 Users Can Lose Refund by Default (#605)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-10 10:02:58 -07:00
Péter Garamvölgyi
207d13c453 refactor(db): revise types in DB migrations (#631) 2023-07-10 14:39:25 +02:00
Péter Garamvölgyi
357173848a refactor(orm): set status fields explicitly during DB operations (#628)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-10 10:57:33 +02:00
Xi Lin
535ec91141 fix(contracts): OZ-L1-H07 L2 Standard ERC-20 Token Metadata Can Be Set Arbitrarily (#606)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-10 15:31:11 +08:00
Péter Garamvölgyi
8a0b526391 refactor(orm): Make ORM usage consistent (#627) 2023-07-10 09:18:08 +02:00
167 changed files with 5667 additions and 1024 deletions

View File

@@ -25,20 +25,20 @@ defaults:
working-directory: 'bridge-history-api'
jobs:
# check:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Install Go
# uses: actions/setup-go@v2
# with:
# go-version: 1.19.x
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Lint
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

80
.github/workflows/prover_stats_api.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
name: ProverStatsAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover-stats-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover-stats-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -29,6 +29,25 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
@@ -47,13 +66,6 @@ jobs:
- name: Test
run: |
make roller
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

View File

@@ -39,6 +39,12 @@ var (
L2WithdrawERC721Sig common.Hash
L2WithdrawERC1155Sig common.Hash
// batch nft sigs
L1BatchDepositERC721Sig common.Hash
L1BatchDepositERC1155Sig common.Hash
L2BatchWithdrawERC721Sig common.Hash
L2BatchWithdrawERC1155Sig common.Hash
// scroll mono repo
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
@@ -116,6 +122,12 @@ func init() {
L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi()
L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID
// batch nft events
L1BatchDepositERC721Sig = L1ERC721GatewayABI.Events["BatchDepositERC721"].ID
L1BatchDepositERC1155Sig = L1ERC1155GatewayABI.Events["BatchDepositERC1155"].ID
L2BatchWithdrawERC721Sig = L2ERC721GatewayABI.Events["BatchWithdrawERC721"].ID
L2BatchWithdrawERC1155Sig = L2ERC1155GatewayABI.Events["BatchWithdrawERC1155"].ID
// scroll monorepo
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
@@ -277,6 +289,23 @@ type ERC1155MessageEvent struct {
Amount *big.Int
}
type BatchERC721MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
}
type BatchERC1155MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
TokenAmounts []*big.Int
}
// scroll monorepo
// L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract.

View File

@@ -24,26 +24,40 @@ var (
var database db.OrmFactory
func pong(ctx iris.Context) {
ctx.WriteString("pong")
_, err := ctx.WriteString("pong")
if err != nil {
log.Error("failed to write pong", "err", err)
}
}
func setupQueryByAddressHandler(backend_app *mvc.Application) {
func setupQueryByAddressHandler(backendApp *mvc.Application) {
// Register Dependencies.
backend_app.Register(
backendApp.Register(
database,
service.NewHistoryService,
)
// Register Controllers.
backend_app.Handle(new(controller.QueryAddressController))
backendApp.Handle(new(controller.QueryAddressController))
}
func setupQueryByHashHandler(backend_app *mvc.Application) {
backend_app.Register(
func setupQueryClaimableHandler(backendApp *mvc.Application) {
// Register Dependencies.
backendApp.Register(
database,
service.NewHistoryService,
)
backend_app.Handle(new(controller.QueryHashController))
// Register Controllers.
backendApp.Handle(new(controller.QueryClaimableController))
}
func setupQueryByHashHandler(backendApp *mvc.Application) {
backendApp.Register(
database,
service.NewHistoryService,
)
backendApp.Handle(new(controller.QueryHashController))
}
func init() {
@@ -76,13 +90,18 @@ func action(ctx *cli.Context) error {
if err != nil {
log.Crit("can not connect to database", "err", err)
}
defer database.Close()
defer func() {
if err = database.Close(); err != nil {
log.Error("failed to close database", "err", err)
}
}()
bridgeApp := iris.New()
bridgeApp.UseRouter(corsOptions)
bridgeApp.Get("/ping", pong).Describe("healthcheck")
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
mvc.Configure(bridgeApp.Party("/api/claimable"), setupQueryClaimableHandler)
// TODO: make debug mode configurable
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))

View File

@@ -12,8 +12,8 @@ import (
"github.com/urfave/cli/v2"
"bridge-history-api/config"
"bridge-history-api/cross_msg"
"bridge-history-api/cross_msg/message_proof"
"bridge-history-api/crossmsg"
"bridge-history-api/crossmsg/messageproof"
"bridge-history-api/db"
cutils "bridge-history-api/utils"
)
@@ -54,15 +54,20 @@ func action(ctx *cli.Context) error {
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
db, err := db.NewOrmFactory(cfg)
defer db.Close()
defer func() {
if deferErr := db.Close(); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
l1worker := &cross_msg.FetchEventWorker{F: cross_msg.L1FetchAndSaveEvents, G: cross_msg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l2worker := &cross_msg.FetchEventWorker{F: cross_msg.L2FetchAndSaveEvents, G: cross_msg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l1AddressList := []common.Address{
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
@@ -84,7 +89,7 @@ func action(ctx *cli.Context) error {
common.HexToAddress(cfg.L2.WETHGatewayAddr),
}
l1crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, cross_msg.L1ReorgHandling)
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err)
}
@@ -92,7 +97,7 @@ func action(ctx *cli.Context) error {
go l1crossMsgFetcher.Start()
defer l1crossMsgFetcher.Stop()
l2crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, cross_msg.L2ReorgHandling)
l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling)
if err != nil {
log.Crit("failed to create l2 cross message fetcher", "error", err)
}
@@ -101,17 +106,17 @@ func action(ctx *cli.Context) error {
defer l2crossMsgFetcher.Stop()
// BlockTimestamp fetcher for l1 and l2
l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
go l1BlockTimeFetcher.Start()
defer l1BlockTimeFetcher.Stop()
l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
go l2BlockTimeFetcher.Start()
defer l2BlockTimeFetcher.Stop()
// Proof updater and batch fetcher
l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
go batchFetcher.Start()
defer batchFetcher.Stop()

View File

@@ -6,6 +6,7 @@ import (
"path/filepath"
)
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
type BatchInfoFetcherConfig struct {
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
ScrollChainAddr string `json:"ScrollChainAddr"`
@@ -21,6 +22,7 @@ type DBConfig struct {
MaxIdleNum int `json:"maxIdleNum"`
}
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
@@ -35,6 +37,7 @@ type LayerConfig struct {
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
}
// ServerConfig is the configuration of the bridge history backend server port
type ServerConfig struct {
HostPort string `json:"hostPort"`
}

View File

@@ -7,14 +7,36 @@ import (
"github.com/ethereum/go-ethereum/common"
)
// QueryAddressController contains the query by address service
type QueryAddressController struct {
Service service.HistoryService
}
// QueryHashController contains the query by hash service
type QueryHashController struct {
Service service.HistoryService
}
// QueryClaimableController contains the query claimable txs service
type QueryClaimableController struct {
Service service.HistoryService
}
// Get defines the http get method behavior for QueryClaimableController
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: txs,
Total: total,
}}, nil
}
// Get defines the http get method behavior for QueryAddressController
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
@@ -28,6 +50,7 @@ func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.Qu
}}, nil
}
// Post defines the http post method behavior for QueryHashController
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
result, err := c.Service.GetTxsByHashes(req.Txs)
if err != nil {

View File

@@ -1,4 +1,4 @@
package cross_msg
package crossmsg
import (
"context"
@@ -8,11 +8,12 @@ import (
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/cross_msg/message_proof"
"bridge-history-api/crossmsg/messageproof"
"bridge-history-api/db"
"bridge-history-api/utils"
)
// BatchInfoFetcher fetches batch info from l1 chain and update db
type BatchInfoFetcher struct {
ctx context.Context
scrollChainAddr common.Address
@@ -21,10 +22,11 @@ type BatchInfoFetcher struct {
blockTimeInSec int
client *ethclient.Client
db db.OrmFactory
msgProofUpdater *message_proof.MsgProofUpdater
msgProofUpdater *messageproof.MsgProofUpdater
}
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher {
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
return &BatchInfoFetcher{
ctx: ctx,
scrollChainAddr: scrollChainAddr,
@@ -37,13 +39,14 @@ func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, ba
}
}
// Start the BatchInfoFetcher
func (b *BatchInfoFetcher) Start() {
log.Info("BatchInfoFetcher Start")
// Fetch batch info at beginning
// Then start msg proof updater after db have some bridge batch
err := b.fetchBatchInfo()
if err != nil {
log.Error("fetch batch info at begining failed: ", "err", err)
log.Error("fetch batch info at beginning failed: ", "err", err)
}
go b.msgProofUpdater.Start()
@@ -65,6 +68,7 @@ func (b *BatchInfoFetcher) Start() {
}()
}
// Stop the BatchInfoFetcher and call msg proof updater to stop
func (b *BatchInfoFetcher) Stop() {
log.Info("BatchInfoFetcher Stop")
b.msgProofUpdater.Stop()

View File

@@ -1,4 +1,4 @@
package cross_msg
package crossmsg
import (
"context"
@@ -9,9 +9,13 @@ import (
"github.com/ethereum/go-ethereum/log"
)
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
type BlockTimestampFetcher struct {
ctx context.Context
confirmation uint64
@@ -21,6 +25,7 @@ type BlockTimestampFetcher struct {
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
}
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
return &BlockTimestampFetcher{
ctx: ctx,
@@ -32,6 +37,7 @@ func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTim
}
}
// Start the BlockTimestampFetcher
func (b *BlockTimestampFetcher) Start() {
go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
@@ -73,6 +79,7 @@ func (b *BlockTimestampFetcher) Start() {
}()
}
// Stop the BlockTimestampFetcher and log the info
func (b *BlockTimestampFetcher) Stop() {
log.Info("BlockTimestampFetcher Stop")
}

View File

@@ -1,4 +1,4 @@
package cross_msg
package crossmsg
import (
"context"
@@ -18,7 +18,8 @@ import (
"bridge-history-api/utils"
)
type CrossMsgFetcher struct {
// MsgFetcher fetches cross message events from blockchain and saves them to database
type MsgFetcher struct {
ctx context.Context
config *config.LayerConfig
db db.OrmFactory
@@ -32,8 +33,9 @@ type CrossMsgFetcher struct {
reorgEndCh chan struct{}
}
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
crossMsgFetcher := &CrossMsgFetcher{
// NewMsgFetcher creates a new MsgFetcher instance
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
msgFetcher := &MsgFetcher{
ctx: ctx,
config: config,
db: db,
@@ -45,11 +47,12 @@ func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.O
reorgStartCh: make(chan struct{}),
reorgEndCh: make(chan struct{}),
}
return crossMsgFetcher, nil
return msgFetcher, nil
}
func (c *CrossMsgFetcher) Start() {
log.Info("CrossMsgFetcher Start")
// Start the MsgFetcher
func (c *MsgFetcher) Start() {
log.Info("MsgFetcher Start")
// fetch missing events from finalized blocks, we don't handle reorgs here
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
@@ -94,12 +97,13 @@ func (c *CrossMsgFetcher) Start() {
}()
}
func (c *CrossMsgFetcher) Stop() {
log.Info("CrossMsgFetcher Stop")
// Stop the MsgFetcher and log the info
func (c *MsgFetcher) Stop() {
log.Info("MsgFetcher Stop")
}
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
// if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64
var err error
@@ -124,7 +128,7 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
processedHeight = int64(c.config.StartHeight)
} else {
processedHeight += 1
processedHeight++
}
for from := processedHeight; from <= int64(number); from += fetchLimit {
to := from + fetchLimit - 1
@@ -139,7 +143,7 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
}
}
func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
func (c *MsgFetcher) fetchMissingLatestHeaders() {
var start int64
number, err := c.client.BlockNumber(c.ctx)
if err != nil {
@@ -159,7 +163,7 @@ func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
close(c.reorgEndCh)
return
default:
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(i)))
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i))
if err != nil {
log.Error("failed to get latest header", "err", err)
return
@@ -181,9 +185,9 @@ func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
c.mu.Lock()
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
if !ok {
log.Error("Reorg happended too earlier than cached headers", "reorg height", header.Number)
num, err := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if err != nil {
log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number)
num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if getSafeErr != nil {
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
}
// clear all our saved data, because no data is safe now

View File

@@ -1,4 +1,4 @@
package cross_msg
package crossmsg
import (
"context"
@@ -25,14 +25,15 @@ type FetchAndSave func(ctx context.Context, client *ethclient.Client, database d
// GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(db db.OrmFactory) (int64, error)
type UpdateXHash func(ctx context.Context)
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
type FetchEventWorker struct {
F FetchAndSave
G GetLatestProcessed
Name string
}
// GetLatestL1ProcessedHeight get L1 the latest processed height
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL1ProcessedHeight()
if err != nil {
@@ -46,11 +47,11 @@ func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
}
if crossHeight > relayedHeight {
return crossHeight, nil
} else {
return relayedHeight, nil
}
return relayedHeight, nil
}
// GetLatestL2ProcessedHeight get L2 latest processed height
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL2ProcessedHeight()
if err != nil {
@@ -77,6 +78,7 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
return maxHeight, nil
}
// L1FetchAndSaveEvents fetch and save events on L1
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
@@ -110,19 +112,25 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
}
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
@@ -130,6 +138,7 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
return nil
}
// L2FetchAndSaveEvents fetche and save events on L2
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
@@ -164,26 +173,34 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
}
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
@@ -191,6 +208,7 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
return nil
}
// FetchAndSaveBatchIndex fetche and save batch index
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
@@ -217,13 +235,17 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
}
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
return err
}

View File

@@ -1,4 +1,4 @@
package message_proof
package messageproof
import (
"context"
@@ -14,12 +14,14 @@ import (
"bridge-history-api/db/orm"
)
// MsgProofUpdater is used to update message proof in db
type MsgProofUpdater struct {
ctx context.Context
db db.OrmFactory
withdrawTrie *WithdrawTrie
}
// NewMsgProofUpdater new MsgProofUpdater instance
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
return &MsgProofUpdater{
ctx: ctx,
@@ -28,6 +30,7 @@ func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock ui
}
}
// Start the MsgProofUpdater
func (m *MsgProofUpdater) Start() {
log.Info("MsgProofUpdater Start")
m.initialize(m.ctx)
@@ -83,6 +86,7 @@ func (m *MsgProofUpdater) Start() {
}
// Stop the MsgProofUpdater
func (m *MsgProofUpdater) Stop() {
log.Info("MsgProofUpdater Stop")
}
@@ -112,7 +116,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
return fmt.Errorf("failed to get first l2 message: %v", err)
}
// no l2 message
// TO DO: check if we realy dont have l2 sent message with nonce 0
// TO DO: check if we really dont have l2 sent message with nonce 0
if firstMsg == nil {
log.Info("No first l2sentmsg in db")
return nil
@@ -183,7 +187,7 @@ func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte,
if len(msgs) == 0 {
return nil
}
// this should not happend, but double checked
// this should not happen, but double check
if len(msgs) != len(proofs) {
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
}

View File

@@ -1,4 +1,4 @@
package message_proof
package messageproof
import (
"github.com/ethereum/go-ethereum/common"

View File

@@ -1,4 +1,4 @@
package message_proof
package messageproof
import (
"math/big"

View File

@@ -1,4 +1,4 @@
package cross_msg_test
package crossmsg_test
import (
"crypto/rand"
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"bridge-history-api/cross_msg"
"bridge-history-api/crossmsg"
)
func TestMergeIntoList(t *testing.T) {
@@ -18,7 +18,7 @@ func TestMergeIntoList(t *testing.T) {
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
headers2, err := generateHeaders(18)
assert.NoError(t, err)
result := cross_msg.MergeAddIntoHeaderList(headers, headers2, 64)
result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64)
assert.Equal(t, 64, len(result))
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
assert.NotEqual(t, headers[0], result[0])
@@ -53,7 +53,7 @@ func generateHeaders(amount int) ([]*types.Header, error) {
Time: uint64(i * 15),
Extra: []byte{},
MixDigest: common.Hash{},
Nonce: types.EncodeNonce(uint64(nonce.Uint64())),
Nonce: types.EncodeNonce(nonce.Uint64()),
}
headers[i] = header
}

View File

@@ -1,4 +1,4 @@
package cross_msg
package crossmsg
import (
"context"
@@ -10,6 +10,7 @@ import (
"bridge-history-api/db"
)
// ReorgHandling handles reorg function type
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
func reverseArray(arr []*types.Header) []*types.Header {
@@ -20,10 +21,12 @@ func reverseArray(arr []*types.Header) []*types.Header {
return arr
}
// IsParentAndChild match the child header ParentHash with parent header Hash
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash()
}
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength {
@@ -34,11 +37,12 @@ func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []
return mergedArr[startIndex:]
}
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, header *types.Header) (int, bool, []*types.Header) {
// BackwardFindReorgBlock finds the reorg block by backward search
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers)
backwardHeaderList := []*types.Header{header}
backwardHeaderList := []*types.Header{lastHeader}
for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, header.ParentHash)
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash)
if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil
@@ -50,10 +54,12 @@ func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client
return j, true, backwardHeaderList
}
}
lastHeader = header
}
return -1, false, nil
}
// L1ReorgHandling handles l1 reorg
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
@@ -61,47 +67,64 @@ func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) e
}
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("commit tx failed", "err", err)
return err
}
return nil
}
// L2ReorgHandling handles l2 reorg
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("begin db tx failed", "err", err)
}
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("commit tx failed", "err", err)
return err
}

View File

@@ -19,6 +19,8 @@ CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN

View File

@@ -4,6 +4,7 @@ create table l2_sent_msg
(
id BIGSERIAL PRIMARY KEY,
original_sender VARCHAR NOT NULL DEFAULT '',
tx_hash VARCHAR NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
@@ -24,6 +25,8 @@ on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN

View File

@@ -11,6 +11,7 @@ type rollupBatchOrm struct {
db *sqlx.DB
}
// RollupBatch is the struct for rollup_batch table
type RollupBatch struct {
ID uint64 `json:"id" db:"id"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`

View File

@@ -8,7 +8,10 @@ import (
"github.com/jmoiron/sqlx"
)
// AssetType can be ETH/ERC20/ERC1155/ERC721
type AssetType int
// MsgType can be layer1/layer2 msg
type MsgType int
func (a AssetType) String() string {
@@ -26,15 +29,22 @@ func (a AssetType) String() string {
}
const (
// ETH = 0
ETH AssetType = iota
// ERC20 = 1
ERC20
// ERC721 = 2
ERC721
// ERC1155 = 3
ERC1155
)
const (
// UnknownMsg = 0
UnknownMsg MsgType = iota
// Layer1Msg = 1
Layer1Msg
// Layer2Msg = 2
Layer2Msg
)
@@ -86,17 +96,20 @@ type L2CrossMsgOrm interface {
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error)
}
// RelayedMsgOrm provides operations on relayed_msg table
type RelayedMsgOrm interface {
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error)
GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error)
GetLatestRelayedHeightOnL1() (int64, error)
GetLatestRelayedHeightOnL2() (int64, error)
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
// L2SentMsgOrm provides operations on l2_sent_msg table
type L2SentMsgOrm interface {
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
@@ -104,11 +117,14 @@ type L2SentMsgOrm interface {
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error
GetLatestL2SentMsgBatchIndex() (int64, error)
GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error)
GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error)
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
// RollupBatchOrm provides operations on rollup_batch table
type RollupBatchOrm interface {
GetLatestRollupBatch() (*RollupBatch, error)
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)

View File

@@ -37,7 +37,14 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {

View File

@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
)
type l2CrossMsgOrm struct {
@@ -37,7 +38,14 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
@@ -140,3 +148,30 @@ func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
}
return result, nil
}
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash = ANY($1) AND msg_type = $2 AND deleted_at IS NULL;`, pq.Array(msgHashList), Layer2Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
if len(results) == 0 {
log.Debug("no L2CrossMsg under given msg hashes", "msg hash list", msgHashList)
}
return results, nil
}

View File

@@ -9,9 +9,11 @@ import (
"github.com/jmoiron/sqlx"
)
// L2SentMsg defines the struct for l2_sent_msg table record
type L2SentMsg struct {
ID uint64 `json:"id" db:"id"`
OriginalSender string `json:"original_sender" db:"original_sender"`
TxHash string `json:"tx_hash" db:"tx_hash"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
@@ -53,6 +55,7 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"original_sender": msg.OriginalSender,
"tx_hash": msg.TxHash,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
@@ -64,7 +67,7 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
"msg_data": msg.MsgData,
}
}
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, tx_hash, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :tx_hash, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err
@@ -87,8 +90,8 @@ func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
return 0, nil
}
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batchIndex, msgHash); err != nil {
return err
}
return nil
@@ -115,6 +118,11 @@ func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight u
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
@@ -149,3 +157,33 @@ func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int6
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
return err
}
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
return results, err
}
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error) {
var count uint64
row := l.db.QueryRowx(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1);`, address)
if err := row.Scan(&count); err != nil {
return 0, err
}
return count, nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/jmoiron/sqlx"
)
// RelayedMsg is the struct for relayed_msg table
type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
@@ -46,9 +47,9 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
return nil
}
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
func (l *relayedMsgOrm) GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error) {
result := &RelayedMsg{}
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil

View File

@@ -4,6 +4,7 @@ import (
"database/sql"
"errors"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
@@ -82,6 +83,11 @@ func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64
if err != nil || rows == nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &orm.CrossMsg{}
if err = rows.StructScan(msg); err != nil {

View File

@@ -1,11 +1,13 @@
package model
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `url:"address"`
Offset int `url:"offset"`
Limit int `url:"limit"`
}
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct {
Txs []string `url:"txs"`
}

View File

@@ -2,16 +2,19 @@ package model
import "bridge-history-api/service"
// Data the return struct of apis
type Data struct {
Result []*service.TxHistoryInfo `json:"result"`
Total uint64 `json:"total"`
}
// QueryByAddressResponse the schema of address api response
type QueryByAddressResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}
// QueryByHashResponse the schema of hash api response
type QueryByHashResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`

View File

@@ -11,6 +11,7 @@ import (
"bridge-history-api/db/orm"
)
// Finalized the schema of tx finalized infos
type Finalized struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
@@ -20,6 +21,7 @@ type Finalized struct {
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
// UserClaimInfo the schema of tx claim infos
type UserClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
@@ -31,6 +33,7 @@ type UserClaimInfo struct {
BatchIndex string `json:"batch_index"`
}
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
@@ -47,6 +50,7 @@ type TxHistoryInfo struct {
type HistoryService interface {
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
}
// NewHistoryService returns a service backed with a "db"
@@ -60,6 +64,7 @@ type historyBackend struct {
db db.OrmFactory
}
// GetCrossTxClaimInfo get UserClaimInfos by address
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
if err != nil {
@@ -106,6 +111,50 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
}
// GetClaimableTxsByAddress get all claimable txs under given address
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
total, err := h.db.GetClaimableL2SentMsgByAddressTotalNum(address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := h.db.GetClaimableL2SentMsgByAddressWithOffset(address.Hex(), offset, limit)
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
var msgHashList []string
for _, result := range results {
msgHashList = append(msgHashList, result.MsgHash)
}
crossMsgs, err := h.db.GetL2CrossMsgByMsgHashList(msgHashList)
// crossMsgs can be empty, because they can be emitted by user directly call contract
if err != nil {
return txHistories, 0, err
}
crossMsgMap := make(map[string]*orm.CrossMsg)
for _, crossMsg := range crossMsgs {
crossMsgMap[crossMsg.MsgHash] = crossMsg
}
for _, result := range results {
txInfo := &TxHistoryInfo{
Hash: result.TxHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &Finalized{},
ClaimInfo: GetCrossTxClaimInfo(result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
txInfo.To = crossMsg.Target
txInfo.BlockTimestamp = crossMsg.Timestamp
txInfo.CreatedAt = crossMsg.CreatedAt
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
// GetTxsByAddress get all txs under given address
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
@@ -137,6 +186,7 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
return txHistories, total, nil
}
// GetTxsByHashes get tx infos under given tx hashes
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0)
for _, hash := range hashes {

View File

@@ -2,6 +2,8 @@ package utils
import (
"context"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
@@ -12,6 +14,7 @@ import (
"bridge-history-api/db/orm"
)
// CachedParsedTxCalldata store parsed batch infos
type CachedParsedTxCalldata struct {
CallDataIndex uint64
BatchIndices []uint64
@@ -19,6 +22,7 @@ type CachedParsedTxCalldata struct {
EndBlocks []uint64
}
// ParseBackendL1EventLogs parses L1 watched events
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
@@ -108,7 +112,43 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
}
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: msgHash,
})
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: msgHash,
})
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
@@ -128,6 +168,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
return l1CrossMsg, relayedMsgs, nil
}
// ParseBackendL2EventLogs parses L2 watched events
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
@@ -172,6 +213,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
@@ -190,6 +232,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
@@ -209,6 +252,46 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{}
@@ -222,6 +305,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
l2SentMsgs = append(l2SentMsgs,
&orm.L2SentMsg{
Sender: event.Sender.Hex(),
TxHash: vlog.TxHash.Hex(),
Target: event.Target.Hex(),
Value: event.Value.String(),
MsgHash: msgHash.Hex(),
@@ -247,6 +331,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
}
// ParseBatchInfoFromScrollChain parses ScrollChain events
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata)
@@ -303,3 +388,13 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
}
return rollupBatches, nil
}
func convertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}

View File

@@ -22,6 +22,7 @@ func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations {

View File

@@ -40,7 +40,7 @@ type ChunkProposerConfig struct {
type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
}

View File

@@ -52,7 +52,7 @@ type Layer1Relayer struct {
gasPriceDiff uint64
l1MessageOrm *orm.L1Message
l1Block *orm.L1Block
l1BlockOrm *orm.L1Block
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -90,7 +90,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
l1Relayer := &Layer1Relayer{
ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1Block: orm.NewL1Block(db),
l1BlockOrm: orm.NewL1Block(db),
messageSender: messageSender,
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
@@ -159,13 +159,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return
}
blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{
blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{
"number": latestBlockHeight,
})
if err != nil {
@@ -197,7 +197,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
@@ -232,14 +232,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}

View File

@@ -128,8 +128,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
l1BlockOrm := orm.NewL1Block(db)
l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
{Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)},
{Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)},
}
// Insert test data.
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
@@ -153,8 +153,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"})
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
})
@@ -175,28 +175,28 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 0, targetErr
})
defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle()
})
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 100, nil
})
defer patchGuard.Reset()
convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
@@ -206,12 +206,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{
{
Hash: "gas-oracle-1",
Number: 0,
GasOracleStatus: int(types.GasOraclePending),
GasOracleStatus: int16(types.GasOraclePending),
},
}
return tmpInfo, nil

View File

@@ -29,7 +29,6 @@ var (
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
@@ -394,15 +393,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(int64(count))
log.Info("Skipping batches", "count", count)
}
// retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{
"rollup_status": types.RollupCommitted,
@@ -430,11 +420,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
@@ -454,8 +439,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
@@ -510,9 +495,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
r.processingFinalization.Store(txID, hash)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
}
}

View File

@@ -90,7 +90,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
@@ -108,67 +108,6 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerSkipBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batch.Hash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped),
createBatch(types.RollupCommitted, types.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped),
createBatch(types.RollupPending, types.ProvingTaskFailed),
createBatch(types.RollupCommitting, types.ProvingTaskFailed),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed),
createBatch(types.RollupFinalized, types.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
}
for _, id := range notSkipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
}
}
func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)

View File

@@ -97,7 +97,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)

View File

@@ -343,6 +343,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap
}
// adjust for rising basefee
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}

View File

@@ -65,6 +65,7 @@ func TestSender(t *testing.T) {
t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -154,6 +155,43 @@ func testResubmitTransaction(t *testing.T) {
}
}
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig

View File

@@ -19,13 +19,13 @@ type BatchProposer struct {
ctx context.Context
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2Block *orm.L2Block
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
}
@@ -37,7 +37,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
db: db,
batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db),
l2Block: orm.NewL2Block(db),
l2BlockOrm: orm.NewL2Block(db),
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
@@ -93,7 +93,6 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
}
if len(dbChunks) == 0 {
log.Warn("No Unbatched Chunks")
return nil, nil
}
@@ -147,7 +146,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
}
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The payload size of the batch is less than the minimum limit",
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
return nil, nil
@@ -158,7 +157,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks {
wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch wrapped blocks",
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)

View File

@@ -61,7 +61,6 @@ func (p *ChunkProposer) TryProposeChunk() {
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
if chunk == nil {
log.Warn("proposed chunk is nil, cannot update in DB")
return nil
}
@@ -86,7 +85,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
if len(blocks) == 0 {
log.Warn("no un-chunked blocks")
return nil, nil
}

View File

@@ -74,7 +74,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
}
l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0
@@ -149,9 +149,10 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
baseFee = block.BaseFee.Uint64()
}
blocks = append(blocks, orm.L1Block{
Number: uint64(height),
Hash: block.Hash().String(),
BaseFee: baseFee,
Number: uint64(height),
Hash: block.Hash().String(),
BaseFee: baseFee,
GasOracleStatus: int16(types.GasOraclePending),
})
}

View File

@@ -109,7 +109,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
}
// Fetch and store block traces for missing blocks
for from := uint64(heightInDB) + 1; from <= blockHeight; from += blockTracesFetchLimit {
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {

View File

@@ -88,7 +88,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && uint64(fetchedHeight) == latestHeight
return err == nil && fetchedHeight == latestHeight
})
assert.True(t, ok)
}

View File

@@ -33,12 +33,12 @@ type Batch struct {
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status"`
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
@@ -71,6 +71,7 @@ func (*Batch) TableName() string {
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
for key, value := range fields {
db = db.Where(key, value)
@@ -88,46 +89,51 @@ func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, o
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, err
return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
}
return batches, nil
}
// GetBatchCount retrieves the total number of batches in the database.
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
var count int64
err := o.db.WithContext(ctx).Model(&Batch{}).Count(&count).Error
if err != nil {
return 0, err
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err)
}
return uint64(count), nil
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
var batch Batch
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
var batch Batch
if err := db.Find(&batch).Error; err != nil {
return nil, err
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
var proof message.AggProof
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
return nil, err
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
return &proof, nil
}
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch
err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error
if err != nil {
return nil, err
if err := db.First(&latestBatch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
}
@@ -138,13 +144,14 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
return nil, nil
}
var batches []Batch
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
var batches []Batch
if err := db.Find(&batches).Error; err != nil {
return nil, err
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes)
}
hashToStatusMap := make(map[string]types.RollupStatus)
@@ -156,7 +163,7 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
for _, hash := range hashes {
status, ok := hashToStatusMap[hash]
if !ok {
return nil, fmt.Errorf("hash not found in database: %s", hash)
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash)
}
statuses = append(statuses, status)
}
@@ -171,23 +178,28 @@ func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, err
return nil, errors.New("limit must be greater than zero")
}
var batches []*Batch
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ?", types.RollupPending)
db = db.Order("index ASC")
db = db.Limit(limit)
db = db.Where("rollup_status = ?", types.RollupPending).Order("index ASC").Limit(limit)
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, err
return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
}
return batches, nil
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch
err := o.db.WithContext(ctx).Where("index = ?", index).First(&batch).Error
if err != nil {
return nil, err
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
}
return &batch, nil
}
@@ -198,13 +210,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
return nil, errors.New("invalid args")
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err)
return nil, err
}
@@ -256,48 +263,41 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
}
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, err
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
return &newBatch, nil
}
// UpdateSkippedBatches updates the skipped batches in the database.
func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.WithContext(ctx).Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
}
return uint64(result.RowsAffected), nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
}
return nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -310,19 +310,22 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["proved_at"] = time.Now()
}
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status)
@@ -332,8 +335,17 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
}
if err := db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
@@ -346,8 +358,13 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
}
return nil
}
@@ -360,8 +377,13 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
}
return nil
}
@@ -371,12 +393,19 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
return err
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err = db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
}

View File

@@ -3,6 +3,7 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
"scroll-tech/common/types"
@@ -24,22 +25,22 @@ type Chunk struct {
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -61,19 +62,22 @@ func (*Chunk) TableName() string {
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
if startIndex > endIndex {
return nil, errors.New("start index should be less than or equal to end index")
return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex)
}
var chunks []*Chunk
db := o.db.WithContext(ctx).Where("index >= ? AND index <= ?", startIndex, endIndex)
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, err
return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex)
}
if startIndex+uint64(len(chunks)) != endIndex+1 {
return nil, errors.New("number of chunks not expected in the specified range")
// sanity check
if uint64(len(chunks)) != endIndex-startIndex+1 {
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
}
return chunks, nil
@@ -81,25 +85,27 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
// GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk
err := o.db.WithContext(ctx).
Where("batch_hash IS NULL").
Order("index asc").
Find(&chunks).Error
if err != nil {
return nil, err
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
}
return chunks, nil
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk
err := o.db.WithContext(ctx).
Order("index desc").
First(&latestChunk).Error
if err != nil {
return nil, err
if err := db.First(&latestChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
}
return &latestChunk, nil
}
@@ -110,17 +116,12 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
return nil, errors.New("invalid args")
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
@@ -128,13 +129,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
var totalL2TxGas uint64
@@ -157,18 +158,24 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: totalL2TxNum,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
log.Error("failed to insert chunk", "hash", hash, "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil
@@ -176,11 +183,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -193,8 +195,16 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["proved_at"] = time.Now()
}
if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
}
return nil
}
@@ -206,7 +216,12 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
return db.Update("batch_hash", batchHash).Error
if err := db.Update("batch_hash", batchHash).Error; err != nil {
return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
}
return nil
}

View File

@@ -2,8 +2,9 @@ package orm
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
@@ -13,14 +14,19 @@ import (
type L1Block struct {
db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"`
GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
// block
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
// oracle
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL1Block create an l1Block instance
@@ -34,54 +40,64 @@ func (*L1Block) TableName() string {
}
// GetLatestL1BlockHeight get the latest l1 block height
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) {
result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row()
if result.Err() != nil {
return 0, result.Err()
}
func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Select("COALESCE(MAX(number), 0)")
var maxNumber uint64
if err := result.Scan(&maxNumber); err != nil {
return 0, err
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err)
}
return maxNumber, nil
}
// GetL1Blocks get the l1 blocks
func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) {
var l1Blocks []L1Block
db := l.db
func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
for key, value := range fields {
db = db.Where(key, value)
}
db = db.Order("number ASC")
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil {
return nil, err
return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields)
}
return l1Blocks, nil
}
// InsertL1Blocks batch insert l1 blocks
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 {
return nil
}
err := l.db.WithContext(ctx).Create(&blocks).Error
if err != nil {
log.Error("failed to insert L1 Blocks", "err", err)
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
}
return err
return nil
}
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{
"oracle_status": int(status),
"oracle_tx_hash": txHash,
}
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
return err
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Where("hash", blockHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
}
return nil
}

View File

@@ -3,6 +3,7 @@ package orm
import (
"context"
"database/sql"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -25,6 +26,11 @@ type L1Message struct {
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL1Message create an L1MessageOrm instance

View File

@@ -3,8 +3,8 @@ package orm
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
@@ -18,16 +18,24 @@ import (
type L2Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint64 `json:"tx_num" gorm:"tx_num"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL2Block creates a new L2Block instance
@@ -42,27 +50,30 @@ func (*L2Block) TableName() string {
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
// If the l2_block table is empty, it returns 0 to represent the genesis block height.
// In case of an error, it returns -1 along with the error.
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) {
var maxNumber int64
if err := o.db.WithContext(ctx).Model(&L2Block{}).Select("COALESCE(MAX(number), 0)").Row().Scan(&maxNumber); err != nil {
return -1, err
}
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("COALESCE(MAX(number), 0)")
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
}
return maxNumber, nil
}
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
var l2Blocks []L2Block
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
var wrappedBlocks []*types.WrappedBlock
@@ -70,12 +81,12 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -89,6 +100,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
// The returned L2Blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
for key, value := range fields {
db = db.Where(key, value)
@@ -106,7 +118,7 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
var l2Blocks []*L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
}
return l2Blocks, nil
}
@@ -116,22 +128,23 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
if startBlockNumber > endBlockNumber {
return nil, errors.New("start block number should be less than or equal to end block number")
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
}
var l2Blocks []L2Block
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, errors.New("number of blocks not expected in the specified range")
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
}
var wrappedBlocks []*types.WrappedBlock
@@ -139,12 +152,12 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -161,13 +174,13 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
header, err := json.Marshal(block.Header)
if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return err
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
txs, err := json.Marshal(block.Transactions)
if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return err
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
l2Block := L2Block{
@@ -176,7 +189,7 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint64(len(block.Transactions)),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
@@ -184,9 +197,11 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
l2Blocks = append(l2Blocks, l2Block)
}
if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil {
log.Error("failed to insert l2Blocks", "err", err)
return err
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
return nil
}
@@ -200,13 +215,19 @@ func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64,
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
db = db.WithContext(ctx).Model(&L2Block{}).Where("number >= ? AND number <= ?", startIndex, endIndex)
tx := db.Update("chunk_hash", chunkHash)
if tx.RowsAffected != int64(endIndex-startIndex+1) {
return fmt.Errorf("expected %d rows to be updated, got %d", endIndex-startIndex+1, tx.RowsAffected)
if tx.Error != nil {
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash)
}
return tx.Error
// sanity check
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
}
return nil
}

View File

@@ -99,7 +99,7 @@ func TestL2BlockOrm(t *testing.T) {
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
assert.NoError(t, err)
assert.Equal(t, int64(3), height)
assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err)
@@ -202,32 +202,11 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted)
assert.NoError(t, err)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(0), count)
batch, err := batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus))
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err, gorm.ErrRecordNotFound)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)

View File

@@ -43,10 +43,10 @@ func testImportL1GasPrice(t *testing.T) {
l1BlockOrm := orm.NewL1Block(db)
// check db status
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight)
blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err)
assert.Equal(t, len(blocks), 1)
assert.Empty(t, blocks[0].OracleTxHash)
@@ -54,7 +54,7 @@ func testImportL1GasPrice(t *testing.T) {
// relay gas price
l1Relayer.ProcessGasPriceOracle()
blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err)
assert.Equal(t, len(blocks), 1)
assert.NotEmpty(t, blocks[0].OracleTxHash)

View File

@@ -209,6 +209,13 @@ issues:
linters:
- errcheck
- gosec
# Exclude abi files in bridge-history-api
- path: backend_abi\.go
linters:
- errcheck
- gosec
- golint
# Exclude some staticcheck messages
- linters:

View File

@@ -2,33 +2,12 @@
package types
import (
"database/sql"
"fmt"
)
// L1BlockStatus represents current l1 block processing status
type L1BlockStatus int
// GasOracleStatus represents current gas oracle processing status
type GasOracleStatus int
const (
// L1BlockUndefined : undefined l1 block status
L1BlockUndefined L1BlockStatus = iota
// L1BlockPending represents the l1 block status is pending
L1BlockPending
// L1BlockImporting represents the l1 block status is importing
L1BlockImporting
// L1BlockImported represents the l1 block status is imported
L1BlockImported
// L1BlockFailed represents the l1 block status is failed
L1BlockFailed
)
const (
// GasOracleUndefined : undefined gas oracle status
GasOracleUndefined GasOracleStatus = iota
@@ -46,18 +25,21 @@ const (
GasOracleFailed
)
// L1BlockInfo is structure of stored l1 block
type L1BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
HeaderRLP string `json:"header_rlp" db:"header_rlp"`
BaseFee uint64 `json:"base_fee" db:"base_fee"`
BlockStatus L1BlockStatus `json:"block_status" db:"block_status"`
GasOracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
func (s GasOracleStatus) String() string {
switch s {
case GasOracleUndefined:
return "GasOracleUndefined"
case GasOraclePending:
return "GasOraclePending"
case GasOracleImporting:
return "GasOracleImporting"
case GasOracleImported:
return "GasOracleImported"
case GasOracleFailed:
return "GasOracleFailed"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}
// MsgStatus represents current layer1 transaction processing status
@@ -86,50 +68,14 @@ const (
MsgRelayFailed
)
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
QueueIndex uint64 `json:"queue_index" db:"queue_index"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
Nonce uint64 `json:"nonce" db:"nonce"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// BlockInfo is structure of stored `block_trace` without `trace`
type BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
BatchHash sql.NullString `json:"batch_hash" db:"batch_hash"`
TxNum uint64 `json:"tx_num" db:"tx_num"`
GasUsed uint64 `json:"gas_used" db:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
}
// RollerProveStatus is the roller prove status of a block batch (session)
type RollerProveStatus int32
const (
// RollerProveStatusUndefined indicates an unknown roller proving status
RollerProveStatusUndefined RollerProveStatus = iota
// RollerAssigned indicates roller assigned but has not submitted proof
RollerAssigned RollerProveStatus = iota
RollerAssigned
// RollerProofValid indicates roller has submitted valid proof
RollerProofValid
// RollerProofInvalid indicates roller has submitted invalid proof
@@ -149,11 +95,19 @@ func (s RollerProveStatus) String() string {
}
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status RollerProveStatus `json:"status"`
// RollerFailureType is the type of a roller session's failure
type RollerFailureType int
const (
// RollerFailureTypeUndefined indicates an unknown roller failure type
RollerFailureTypeUndefined RollerFailureType = iota
)
func (s RollerFailureType) String() string {
switch s {
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -164,8 +118,6 @@ const (
ProvingStatusUndefined ProvingStatus = iota
// ProvingTaskUnassigned : proving_task is not assigned to be proved
ProvingTaskUnassigned
// ProvingTaskSkipped : proving_task is skipped for proof generation
ProvingTaskSkipped
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover
@@ -180,8 +132,6 @@ func (ps ProvingStatus) String() string {
switch ps {
case ProvingTaskUnassigned:
return "unassigned"
case ProvingTaskSkipped:
return "skipped"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProved:
@@ -191,7 +141,7 @@ func (ps ProvingStatus) String() string {
case ProvingTaskFailed:
return "failed"
default:
return "undefined"
return fmt.Sprintf("Undefined (%d)", int32(ps))
}
}
@@ -209,6 +159,17 @@ const (
ChunkProofsStatusReady
)
func (s ChunkProofsStatus) String() string {
switch s {
case ChunkProofsStatusPending:
return "ChunkProofsStatusPending"
case ChunkProofsStatusReady:
return "ChunkProofsStatusReady"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
type RollupStatus int
@@ -225,10 +186,29 @@ const (
RollupFinalizing
// RollupFinalized : finalize transaction is confirmed to layer1
RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
// RollupCommitFailed : rollup commit transaction confirmed but failed
RollupCommitFailed
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
RollupFinalizeFailed
)
func (s RollupStatus) String() string {
switch s {
case RollupPending:
return "RollupPending"
case RollupCommitting:
return "RollupCommitting"
case RollupCommitted:
return "RollupCommitted"
case RollupFinalizing:
return "RollupFinalizing"
case RollupFinalized:
return "RollupFinalized"
case RollupCommitFailed:
return "RollupCommitFailed"
case RollupFinalizeFailed:
return "RollupFinalizeFailed"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}

View File

@@ -52,11 +52,6 @@ func TestProvingStatus(t *testing.T) {
ProvingTaskUnassigned,
"unassigned",
},
{
"ProvingTaskSkipped",
ProvingTaskSkipped,
"skipped",
},
{
"ProvingTaskAssigned",
ProvingTaskAssigned,
@@ -80,7 +75,7 @@ func TestProvingStatus(t *testing.T) {
{
"Undefined",
ProvingStatus(999), // Invalid value.
"undefined",
"Undefined (999)",
},
}

View File

@@ -38,8 +38,10 @@ func (r ProofType) String() string {
}
const (
// ProofTypeUndefined is an unknown proof type
ProofTypeUndefined ProofType = iota
// ProofTypeChunk is default roller, it only generates zk proof from traces.
ProofTypeChunk ProofType = iota
ProofTypeChunk
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch
)
@@ -60,8 +62,6 @@ type Identity struct {
Name string `json:"name"`
// Roller RollerType
RollerType ProofType `json:"roller_type,omitempty"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
Version string `json:"version"`

View File

@@ -3,7 +3,6 @@ package message
import (
"encoding/hex"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
@@ -16,10 +15,9 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testName",
Timestamp: uint32(time.Now().Unix()),
Version: "testVersion",
Token: "testToken",
Name: "testName",
Version: "testVersion",
Token: "testToken",
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
@@ -50,14 +48,13 @@ func TestIdentityHash(t *testing.T) {
identity := &Identity{
Name: "testName",
RollerType: ProofTypeChunk,
Timestamp: uint32(1622428800),
Version: "testVersion",
Token: "testToken",
}
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "b3f152958dc881446fc131a250526139d909710c6b91b4d3281ceded28ce2e32"
expectedHash := "c0411a19531fb8c6133b2bae91f361c14e65f2d318aef72b83519e6061cad001"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
@@ -109,15 +106,15 @@ func TestProofDetailHash(t *testing.T) {
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "fdfaae752d6fd72a7fdd2ad034ef504d3acda9e691a799323cfa6e371684ba2b"
expectedHash := "8ad894c2047166a98b1a389b716b06b01dc1bd29e950e2687ffbcb3c328edda5"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
func TestProveTypeString(t *testing.T) {
proofTypeChunk := ProofType(0)
proofTypeChunk := ProofType(1)
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
proofTypeBatch := ProofType(1)
proofTypeBatch := ProofType(2)
assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProof := ProofType(3)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.11"
var tag = "v4.0.25"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -181,6 +181,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### onERC1155BatchReceived
```solidity
@@ -258,7 +274,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
@@ -380,6 +396,25 @@ Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
| _tokenIds | uint256[] | undefined |
| _amounts | uint256[] | undefined |
### BatchRefundERC1155
```solidity
event BatchRefundERC1155(address indexed token, address indexed recipient, uint256[] tokenIds, uint256[] amounts)
```
Emitted when some ERC1155 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenIds | uint256[] | undefined |
| amounts | uint256[] | undefined |
### DepositERC1155
```solidity
@@ -443,6 +478,22 @@ Emitted when the ERC1155 NFT is transfered to recipient in layer 1.
| _tokenId | uint256 | undefined |
| _amount | uint256 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
@@ -460,6 +511,25 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RefundERC1155
```solidity
event RefundERC1155(address indexed token, address indexed recipient, uint256 tokenId, uint256 amount)
```
Emitted when some ERC1155 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -175,6 +175,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### onERC721Received
```solidity
@@ -225,7 +241,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
@@ -324,6 +340,24 @@ Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
### BatchRefundERC721
```solidity
event BatchRefundERC721(address indexed token, address indexed recipient, uint256[] tokenIds)
```
Emitted when a batch of ERC721 tokens are refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenIds | uint256[] | undefined |
### DepositERC721
```solidity
@@ -384,6 +418,22 @@ Emitted when the ERC721 NFT is transfered to recipient in layer 1.
| _to | address | undefined |
| _tokenId | uint256 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
@@ -401,6 +451,24 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RefundERC721
```solidity
event RefundERC721(address indexed token, address indexed recipient, uint256 tokenId)
```
Emitted when some ERC721 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -320,7 +320,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### requestERC20
@@ -496,6 +496,22 @@ Emitted when ETH is withdrawn from L2 to L1 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
@@ -513,6 +529,41 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### RefundETH
```solidity
event RefundETH(address indexed recipient, uint256 amount)
```
Emitted when some ETH is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### SetDefaultERC20Gateway
```solidity

View File

@@ -27,6 +27,26 @@ The address of counterpart ScrollMessenger contract in L1/L2.
|---|---|---|
| _0 | address | undefined |
### dropMessage
```solidity
function dropMessage(address _from, address _to, uint256 _value, uint256 _messageNonce, bytes _message) external nonpayable
```
Drop a skipped message.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _messageNonce | uint256 | undefined |
| _message | bytes | undefined |
### feeVault
```solidity
@@ -63,6 +83,28 @@ Initialize the storage of L1ScrollMessenger.
| _rollup | address | The address of ScrollChain contract. |
| _messageQueue | address | The address of L1MessageQueue contract. |
### isL1MessageDropped
```solidity
function isL1MessageDropped(bytes32) external view returns (bool)
```
Mapping from L1 message hash to drop status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isL1MessageSent
```solidity
@@ -107,6 +149,23 @@ Mapping from L2 message hash to a boolean value indicating if the message has be
|---|---|---|
| _0 | bool | undefined |
### maxReplayTimes
```solidity
function maxReplayTimes() external view returns (uint256)
```
The maximum number of times each L1 message can be replayed.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### messageQueue
```solidity
@@ -158,6 +217,28 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### prevReplayIndex
```solidity
function prevReplayIndex(uint256) external view returns (uint256)
```
Mapping from queue index to previous replay queue index.
*If a message `x` was replayed 3 times with index `q1`, `q2` and `q3`, the value of `prevReplayIndex` and `replayStates` will be `replayStates[hash(x)].lastIndex = q3`, `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1`, `prevReplayIndex[q1] = x` and `prevReplayIndex[x]=nil`.The index `x` that `prevReplayIndex[x]=nil` is used as the termination of the list. Usually we use `0` to represent `nil`, but we cannot distinguish it with the first message with index zero. So a nonzero offset `1` is added to the value of `prevReplayIndex[x]` to avoid such situation.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### relayMessageWithProof
```solidity
@@ -187,13 +268,13 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### replayMessage
```solidity
function replayMessage(address _from, address _to, uint256 _value, uint256 _queueIndex, bytes _message, uint32 _newGasLimit, address _refundAddress) external payable
function replayMessage(address _from, address _to, uint256 _value, uint256 _messageNonce, bytes _message, uint32 _newGasLimit, address _refundAddress) external payable
```
Replay an existing message.
@@ -207,11 +288,34 @@ Replay an existing message.
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _queueIndex | uint256 | undefined |
| _messageNonce | uint256 | undefined |
| _message | bytes | undefined |
| _newGasLimit | uint32 | undefined |
| _refundAddress | address | undefined |
### replayStates
```solidity
function replayStates(bytes32) external view returns (uint128 times, uint128 lastIndex)
```
Mapping from L1 message hash to replay state.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| times | uint128 | undefined |
| lastIndex | uint128 | undefined |
### rollup
```solidity
@@ -316,6 +420,22 @@ Update fee vault contract.
|---|---|---|
| _newFeeVault | address | The address of new fee vault contract. |
### updateMaxReplayTimes
```solidity
function updateMaxReplayTimes(uint256 _maxReplayTimes) external nonpayable
```
Update max replay times.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _maxReplayTimes | uint256 | The new max replay times. |
### xDomainMessageSender
```solidity
@@ -353,6 +473,22 @@ Emitted when a cross domain message is failed to relay.
|---|---|---|
| messageHash `indexed` | bytes32 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
@@ -456,5 +592,21 @@ Emitted when owner updates fee vault contract.
| _oldFeeVault | address | undefined |
| _newFeeVault | address | undefined |
### UpdateMaxReplayTimes
```solidity
event UpdateMaxReplayTimes(uint256 maxReplayTimes)
```
Emitted when the maximum number of times each message can be replayed is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| maxReplayTimes | uint256 | undefined |

View File

@@ -198,6 +198,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### router
```solidity
@@ -261,5 +277,39 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |

View File

@@ -196,6 +196,22 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### onDropMessage
```solidity
function onDropMessage(bytes _message) external payable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _message | bytes | undefined |
### router
```solidity
@@ -259,5 +275,39 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### RefundERC20
```solidity
event RefundERC20(address indexed token, address indexed recipient, uint256 amount)
```
Emitted when some ERC20 token is refunded.
#### Parameters
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |

View File

@@ -219,7 +219,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
@@ -422,6 +422,22 @@ Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity

View File

@@ -188,7 +188,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
@@ -364,6 +364,22 @@ Emitted when the ERC721 NFT is transfered to recipient in layer 2.
| to | address | undefined |
| tokenId | uint256 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity

View File

@@ -214,7 +214,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### setDefaultERC20Gateway
@@ -437,6 +437,22 @@ Emitted when ETH is deposited from L1 to L2 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity

View File

@@ -257,7 +257,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### retryMessageWithProof
@@ -469,6 +469,22 @@ Emitted when a cross domain message is failed to relay.
|---|---|---|
| messageHash `indexed` | bytes32 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity

View File

@@ -244,6 +244,22 @@ Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### WithdrawERC20
```solidity

View File

@@ -260,6 +260,22 @@ Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
| amount | uint256 | undefined |
| data | bytes | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### WithdrawERC20
```solidity

View File

@@ -98,7 +98,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### transferOwnership
@@ -127,7 +127,7 @@ function transferOwnership(address newOwner) external nonpayable
event DeployToken(address indexed _l1Token, address indexed _l2Token)
```
Emitted when a l2 token is deployed.

View File

@@ -2,16 +2,7 @@
/* eslint-disable node/no-missing-import */
import { expect } from "chai";
import { BigNumber, constants } from "ethers";
import {
concat,
getAddress,
hexlify,
keccak256,
randomBytes,
RLP,
stripZeros,
TransactionTypes,
} from "ethers/lib/utils";
import { concat, getAddress, hexlify, keccak256, randomBytes, RLP, stripZeros } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { L1MessageQueue, L2GasPriceOracle } from "../typechain";
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
@@ -294,4 +285,55 @@ describe("L1MessageQueue", async () => {
}
});
});
context("#dropCrossDomainMessage", async () => {
it("should revert, when non-messenger call", async () => {
await expect(queue.connect(signer).dropCrossDomainMessage(0)).to.revertedWith(
"Only callable by the L1ScrollMessenger"
);
});
it("should revert, when drop executed message", async () => {
// append 10 messages
for (let i = 0; i < 10; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
// pop 5 messages with no skip
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 5, 0))
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 5, 0);
for (let i = 0; i < 5; i++) {
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
}
expect(await queue.pendingQueueIndex()).to.eq(5);
for (let i = 0; i < 5; i++) {
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith(
"message already dropped or executed"
);
}
// drop pending message
for (let i = 6; i < 10; i++) {
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith("cannot drop pending message");
}
});
it("should succeed", async () => {
// append 10 messages
for (let i = 0; i < 10; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
// pop 10 messages, all skipped
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 10, 0x3ff))
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 10, 0x3ff);
for (let i = 0; i < 10; i++) {
expect(BigNumber.from(await queue.getCrossDomainMessage(i))).to.gt(constants.Zero);
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.emit(queue, "DropTransaction").withArgs(i);
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
}
});
});
});

View File

@@ -88,6 +88,29 @@ const testcases: Array<ITestConfig> = [
"0xe19f36c2516eb411c7c89f75dcf98d8ff95555585215a5f6242b4f24adbcb7424901",
],
},
{
block: 17654287,
account: "0x0068cf6ef4fdf5a95d0e2546dab76f679969f3f5",
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
expectedRoot: "0x6e0dbf614f46a7d99f42bb19b8d077852fe794a040ae62a4468810e3f0948bca",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000002",
accountProof: [
"0xf90211a01d16453323b30ed0475ac6e0d8498b3defee9b0a2a273208a37ddceb5e41f69ea09173547e094231c109ca45892dd38e73edeecbde5f89585c981ee7c6ac038a24a01980107b929796de0e9cc55bb7505c7adfd297758a257cb43b9af152d596856da00db006c2271b90391c0a1290bdba55f2b615eeb5307a8bbb4c8faeb15b8b7ce3a0b3ff04cb4685f72483f12c93de3354d621695c59a324e72e09f353af209888a9a0ba895b778ace696ec60b09f295ed885afaa027bd104e3fe58cf106471c71697ca0b8521df9c6de45143d29a6a1e1f855ea11d62a30ebf601914d3524bcdee2d6fda0515cd9a8cbcb46a715fb7ff67fcf72bb433a4c2bbe53c64d0d92d83d7e4bfb68a05a12f19c558e64b1cf5f404e8ec0fcebc0c3c6b05eb5b12bcd5366fe09107652a00e5b7619cff2463f4c063e6be5d511635cca7cff72c5ec8e2d6ab2feea7700e0a09181d13feeaa190bf4ae88de86b0adf2ad55a94864245c2d9f32ccba082e3d40a00c23af247cc206f2edaa46d98d290c8b49018909d6ed97b7346e7ef69e4fb5c4a03acdc865ba1b65c64f9c495fa01975869eb1472a9e87d321a8ffb8ea4dd53a33a00522f686e138ca2ae2b3063319e01b3caf8447d19a0a00affd567e673f57e3f1a048e346e25a5bba903ff0d73354f0289cd12adecef7a50f1a5e6b77fe840c3216a099820f0f4022552c5439640dfe42f18ac125ae827698341a20fc35fca1c9244680",
"0xf90211a01ec95bbb5cf4dc39f63683626c7b0a773d55160908ec99e59a5326380c73203ea0c5a50a01b607a8d5eef6ab0876a506c461d1c598bc4836516421ed1019d56bfaa01a976d9c6cc14e1f6047cbc492985d4812b7ca2f93cf63a2b34200a8212d7a54a040b68773658d419c60e169a84ff4b45594909c4a5daf2793d4d5356bbf8b0e2fa013167a747e7a5a34c9f207f50c62c44b717818ac25c3fcc0f1ff27d7487c8504a0640b788ab79273cb8b6785d078799e71cecd9c209e26c6f870e757ab9defde6ca0de36a43a3e775e7ee817093b16f423dc995f9444a29c9cd06e2f8232254cb95da03c4820d01e2a9e74f3e6da9db1566f5e94b3a933b9864e1702666fbae3175359a0f65de5b5290317566435b695d805ae983e659c93066b72fa09d34114d7dcbdd8a09b2c0648e974edef55f325dedd496139f35bc377ed3e9be17433ee7e8dba5643a0a720648153362da9c1e5ea94416af5f0a069b493ffacbd1def2efa256ced334aa0c5cb7b88ef6fb88d93301bc182581b5680551d1e6d9c518ccbb78f9da0dbe9e9a08070a83e329446fb9a61cbd358b1a14e8f633b5980ed39018a39e856e29fd7c3a055541ec7a175eb2b357bfc608c8c41a313c0e7432b5db3a45eb332bf39ffc705a074a59d2b10b98d80424671e0fed8b95c02c63f007279c77c8637201c806024b8a0a0dd938f251e9e58f33c56842e54e70f6edaac451cdd4ff908a4a845b8a094e180",
"0xf90211a05791255edb067e389d9230610cf6c0f2b5bdba04c787cc253b4408c0de058380a0f0e14c57af008db14bbd968eeeede1f66a277fbcf33d9d6dc7d7909269d31f69a0d63b643ad254d69b440f854ec3c29d0222093d6ee5c694ae130c9a2f30d750b0a0981b82957b34d3cdfcd9677c5c3e7e51f8907de9d62f4e2c1c7daabd53e98f18a0e1c921639a76503e3499702d6c7880c3dbc49e74e4f4e3145e897382819143dba06451b79e74690762041d77f207ae889e6d0379202f37591479e644dfe7e2ef3fa0d40d43938929bbc1656e2f5ad99476b953bb32ad8c81bc190650a34bad9b8befa0f2869cb5ace3192e29664e44073af68f0cee1c095baec010acba5eb710651841a0eb9cfad4c796fa13ad2692e3036e951480afb3b168fde14b59bd6ad5080a6922a0a7d8e8298485e3f6d2949c11ab2a5e2b5f685edf92e57fff7bbaec2d289fe150a08bb1562017cfe065095f8022e9c09e0e8dca556e37103f643f4ac7aa48ce6fa2a0801236ae1ab97c2ebd3d3d84a3649a70d947190a3bf03ff2e08c9566ccb48e96a0d677f601663075137b9a84af1134527bf8b1df1c7f02de4e6509be3892aad4e4a0b6c4e67c85d79a08878b349e1cce515d64fc28915ea3558053e1248ce70c5286a0d0a428122fb8105e813924660214896ec0851fd4f33e64851452476e817e2097a0727eeb8e9ea4e62815c0f46708a1527a9ff9fac9994c1442f9c118c0be8817b580",
"0xf90211a0969453ccd835ca415f87e7f63eb2a2c3d349616589c9bea36e6d9503b1418aa0a0a73b67797d24c75dd716fe6779f8a462a1671efb1a2d1433089eb0e2a17d105ca042a06440c9d52f7d6ffe8ab7c3c453a05a0243ba5aee609819bd5dacd8363be2a00a70d832a68a496c174ca785ba4bdddd5052ca3fb2410e4b667851fa7fb5272fa0af4e35e3f9b0617d7ff80cb79e1ef0921f21a41279292176d9e117f422595eb8a0a103553baefb54481ff0ea34d75edad63879182dd595e4aa8f85ca185cfa1abba081b203264801d3b0fdbefd7954900b4165dadb47a6ea210b92d8233429501a64a0f8e5f8ae8ac56d69b2e9ab881a0b853277e92833e7067b40651719d7bba31bfda0ebda38384f871cf603a4f5ad2a265e818aa91cbb68b4de1c2e1aadef084f4ceda08fa0b8481c834c6b4cb1a85d49b3509632ae55f5d194a69c821ad9753c3a5500a0aca036de251523a82f8188ef0525cc3d2e776aa786cf3457d8a483c4d1cf88a5a04f817b7b1e70e186f6f9ab7cea8f7437f7efb3aeeed22ba6a6ea380bfcf962a4a0cc6653c620e174f2d3e3e0756cbdeebc2cbcc1aa1c67cbf1c2ff8aaca84823c8a094e265b2c06fa02987213ade5598826a29e1a24bb04df2b102a646cfcede6c90a0f021a62413e90e1e8eaa49dc399a48a9d5ec3f84dfdbaa0a3b4a419dd1bde199a0e79d5f428c71f7435fa18cc01a6484a30dd43c781a3e5f14c95136dddd5901c880",
"0xf90211a05351d3596a31b5813aeed8088aec6f64b64ffb590a9b9a675e3c24917f1bd15ea00a098a769a5627b156bb55ef7d334a364443cbfe28431e2ff28165b44cb95542a01c7661008fe9ca32f0270d302d82979c1495343c9bf25e87dad3612929a5cb36a01f85cb40320b137c49e2ecb5aa3a3db85833c69747a86f2be7f4dc0d745742b2a03aa74296d38eb5c33bca5c94195da9f32a7217349e69575b84144268e6f1b2dca062ecf298a044531b19c8beea544f99197df6e8b54219a0f27dba4d4fa541babaa0b0cd63f95c67f0597165158d09c7c27d7d8202f9be608ae26dd5316433fe7b7ba0bf61c2af584a27b46bef9a14e527e7c0c2000e123d1bf39eccd59db47ecba01da0bf89e2fb3b1941518e2eb35824cdfa1245d6cb7bfbec99f6309a4d18fb9755a6a0a8b9e450a3d15bf70d4f9a6f6a3fd34dbd7eafcff3f0acb1d1ac6d162089b522a0250aa8ab557f89ffa83d661e8757f693b0d1315bca13d5bd1fc6897912a0ad45a0a962eba19068192509edcf9adf2b976ad84bd1bcd053bff075ec7ab214a80faaa0110825fe0f439d26bb6270e37fe89e8b9f9fab096420d5cbaddd04a349370209a0c7ff710535da560b7329183cd75aeeb34564ca01447eec54590bbdcfbdc8601ea09ff46c368e03322f356f1c5c863c4cf1aed1967bf39d020fb4bd0c281c68f0baa030a7ae32120f6c0a271f70f5323b5a4b7ec7c9b50eca8e62a9fe50b47b6fe09680",
"0xf90211a005d90b43639d8703b5b46e1a300ee59e21bc329ace56f5fd40a49f8c98c997c4a02634774fbdaf6c1380337541329edb575ca40bdf38f99d22f20c6599fb70a915a02b80f43a33554844dc3ad3b346db70a2edd74985b440c22fe70f51918c6e1464a0a685cffee53f88bfcfdede63aaf73becc1f5621c08390c13b02dc9de8cfc0521a0bbd2a32a4b54ebf626031f190e39c90724ffd91b9474d841b6eb9c0f76bc8ce7a0c860455b9da06b1e69e4aa952c787761acdb1603913b4965fae684cf016bcbdca067e8f57fae01bfed4e1add6a0b9ac5641be21ddf24ef18289e87035d75b871f5a0adabd78fddeaa6c42803f81a23e7991eb611e9ec1c9071a24022aac0a71a8ceea07307279fbc13f2f29a5f6c21add5106efa24f73ccb7ca02d460196e3810c37afa0392aa0794df746a43cb59907b892bf5aa78051b45c7ca1112c5b68a523623295a0a5e09e62e5cfff36c2dd8e738762140f943b6c2c85d8b5385f121f74ae01282ca00943c8def2f6f51ef2e99cbe99c50eb5f599bccb039529a874c6b3de3e5cd2e2a082affeb98e43b37d23515983324bc459bbf503ddcd51e9457acd9b1ee47ce4a1a0a3dc55ef190ed6ba960abae03c755928f46eafd5fec04db7b78345eddc1e16d0a0d8e9e0f89f9729526f4ba732797d9bb81e6eccfcf5778cde7e99545a313c8697a097c3d5a698982821f3354cf5a20a6673b5300019af36518f85e4759e554e4def80",
"0xf90171a0d346d1b6b8da1dfb0b97f26625ef03ef5636d748d022ce03898d32c8aeff119f80a0967b15d622a02ad2992db69e49b0460151757bf3b360df5fea408bea63c026cda09c6b26a5217537924d8783f57eaca4b17d0cf7bd2803ff372ca9e4c7c54bc29fa0cebf14dcf1027078e6c58277f932e1caa306aa29d12ebb690c54c5738124ae8880a05b45f21d20b4391a22b68e2d164a58c2a80d356968dfee8650c5e2516398ab8880a0e4b87f12645c511bcb763c404a18b754ee4644ae30097302385298a3f98aae10a056cf6e6079e53f30b464df39a4df44ef33d032009b46e60ad2204fdd5ea9033fa0fe4c7e738646adc940b306191f948c0c5cc7bd694d954c945d8c766d7ef34c92a0e8c65be6abb26a472102fc89faebc972b38d2badc20e7d7b16206d4b22f3462580a080b20dc002a994c21508d73e79464af914fa6d746bbba02477b512ad0eb7ea7f80a0c80aadfec76857d1cceeeec3cbd04f6923e36334fa394d20c0fa8e0da078e9e280",
"0xe58313f4a7a02023e131c23f3ffcfdc667ddfb152602a5584a3bc89d5fc721273d06280756c4",
"0xf851a0d19a48e137ace64957841715dac64aca2b13b67dcc281c40a5d0952ac474a61380808080808080808080808080a07028d519df1e68348cc0597fe6fa3335b181a3f0ddab251792916634c9d2ed9e8080",
"0xf8639a3252ed22ad4e036cb1f5cf3d262d5438ece2fc3097ed10fb737bb846f8448080a011de7f0b3b50bbe96191c2bb22b22933a18739348bc469ef3f995cfaf66c2352a0c2d83c5e1e5dcb9487d2b2b5689520b4377d503cc54d63b144f88cb21835595c",
],
storageProof: [
"0xf90111a0845a58d6992ac45fa0bcc607366bf7f88e0d03dc3dbce12a1cc4015c4d8abdeba0c26c8ac66961c484cbf850ff321be32b5ba99620fe480648db12d6783a17078aa04e918b76be51be2f02df0ac6191ec2765d401d2229e47291806815da755f5b5e80a06d7d944d988655e1fdc697223d3c3e115d005968e289f141c94f105d86c74196808080a0dc97b79c03ff591103b4961835f65de82a94c7166fc1e390f35deaa11b158c408080a057a2faf89e4b43c431600b5b6c687cdb6cbe5af08f9380158806b7e603da7b78a0f45cd6b758f905463cb8d116e78e191d983f7c26f98460306a7d6217c62da9778080a0a334cfbd70c0bfdfdce3d0aa560293be9cd496c2d47c8508c82f210b01dfc58680",
"0xe2a0390decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56302",
],
},
];
describe("PatriciaMerkleTrieVerifier", async () => {

View File

@@ -55,8 +55,8 @@
"typescript": "^4.5.2"
},
"dependencies": {
"@openzeppelin/contracts": "^4.5.0",
"@openzeppelin/contracts-upgradeable": "^4.5.2"
"@openzeppelin/contracts": "^v4.9.2",
"@openzeppelin/contracts-upgradeable": "^v4.9.2"
},
"lint-staged": {
"*.{js,ts}": "npx eslint --cache --fix",

View File

@@ -5,6 +5,14 @@ pragma solidity ^0.8.0;
import {IScrollMessenger} from "../libraries/IScrollMessenger.sol";
interface IL1ScrollMessenger is IScrollMessenger {
/**********
* Events *
**********/
/// @notice Emitted when the maximum number of times each message can be replayed is updated.
/// @param maxReplayTimes The new maximum number of times each message can be replayed.
event UpdateMaxReplayTimes(uint256 maxReplayTimes);
/***********
* Structs *
***********/
@@ -40,7 +48,7 @@ interface IL1ScrollMessenger is IScrollMessenger {
/// @param from The address of the sender of the message.
/// @param to The address of the recipient of the message.
/// @param value The msg.value passed to the message call.
/// @param queueIndex The queue index for the message to replay.
/// @param messageNonce The nonce for the message to replay.
/// @param message The content of the message.
/// @param newGasLimit New gas limit to be used for this message.
/// @param refundAddress The address of account who will receive the refunded fee.
@@ -48,9 +56,23 @@ interface IL1ScrollMessenger is IScrollMessenger {
address from,
address to,
uint256 value,
uint256 queueIndex,
uint256 messageNonce,
bytes memory message,
uint32 newGasLimit,
address refundAddress
) external payable;
/// @notice Drop a skipped message.
/// @param from The address of the sender of the message.
/// @param to The address of the recipient of the message.
/// @param value The msg.value passed to the message call.
/// @param messageNonce The nonce for the message to drop.
/// @param message The content of the message.
function dropMessage(
address from,
address to,
uint256 value,
uint256 messageNonce,
bytes memory message
) external;
}

View File

@@ -13,7 +13,10 @@ import {ScrollMessengerBase} from "../libraries/ScrollMessengerBase.sol";
import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
import {WithdrawTrieVerifier} from "../libraries/verifier/WithdrawTrieVerifier.sol";
import {IMessageDropCallback} from "../libraries/callbacks/IMessageDropCallback.sol";
// solhint-disable avoid-low-level-calls
// solhint-disable reason-string
/// @title L1ScrollMessenger
/// @notice The `L1ScrollMessenger` contract can:
@@ -26,6 +29,17 @@ import {WithdrawTrieVerifier} from "../libraries/verifier/WithdrawTrieVerifier.s
/// @dev All deposited Ether (including `WETH` deposited throng `L1WETHGateway`) will locked in
/// this contract.
contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1ScrollMessenger {
/***********
* Structs *
***********/
struct ReplayState {
// The number of replayed times.
uint128 times;
// The queue index of lastest replayed one. If it is zero, it means the message has not been replayed.
uint128 lastIndex;
}
/*************
* Variables *
*************/
@@ -36,12 +50,34 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
/// @notice Mapping from L2 message hash to a boolean value indicating if the message has been successfully executed.
mapping(bytes32 => bool) public isL2MessageExecuted;
/// @notice Mapping from L1 message hash to drop status.
mapping(bytes32 => bool) public isL1MessageDropped;
/// @notice The address of Rollup contract.
address public rollup;
/// @notice The address of L1MessageQueue contract.
address public messageQueue;
/// @notice The maximum number of times each L1 message can be replayed.
uint256 public maxReplayTimes;
/// @notice Mapping from L1 message hash to replay state.
mapping(bytes32 => ReplayState) public replayStates;
/// @notice Mapping from queue index to previous replay queue index.
///
/// @dev If a message `x` was replayed 3 times with index `q1`, `q2` and `q3`, the
/// value of `prevReplayIndex` and `replayStates` will be `replayStates[hash(x)].lastIndex = q3`,
/// `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1`,
/// `prevReplayIndex[q1] = x` and `prevReplayIndex[x]=nil`.
///
/// @dev The index `x` that `prevReplayIndex[x]=nil` is used as the termination of the list.
/// Usually we use `0` to represent `nil`, but we cannot distinguish it with the first message
/// with index zero. So a nonzero offset `1` is added to the value of `prevReplayIndex[x]` to
/// avoid such situation.
mapping(uint256 => uint256) public prevReplayIndex;
/***************
* Constructor *
***************/
@@ -62,9 +98,6 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
rollup = _rollup;
messageQueue = _messageQueue;
// initialize to a nonzero value
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
}
/*****************************
@@ -78,7 +111,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
bytes memory _message,
uint256 _gasLimit
) external payable override whenNotPaused {
_sendMessage(_to, _value, _message, _gasLimit, tx.origin);
_sendMessage(_to, _value, _message, _gasLimit, msg.sender);
}
/// @inheritdoc IScrollMessenger
@@ -100,12 +133,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
uint256 _nonce,
bytes memory _message,
L2MessageProof memory _proof
) external override whenNotPaused {
require(
xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER,
"Message is already in execution"
);
) external override whenNotPaused notInExecution {
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(_from, _to, _value, _nonce, _message));
require(!isL2MessageExecuted[_xDomainCalldataHash], "Message was already successfully executed");
@@ -144,21 +172,23 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
address _from,
address _to,
uint256 _value,
uint256 _queueIndex,
uint256 _messageNonce,
bytes memory _message,
uint32 _newGasLimit,
address _refundAddress
) external payable override whenNotPaused {
) external payable override whenNotPaused notInExecution {
// We will use a different `queueIndex` for the replaced message. However, the original `queueIndex` or `nonce`
// is encoded in the `_message`. We will check the `xDomainCalldata` in layer 2 to avoid duplicated execution.
// So, only one message will succeed in layer 2. If one of the message is executed successfully, the other one
// will revert with "Message was already successfully executed".
address _messageQueue = messageQueue;
address _counterpart = counterpart;
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _queueIndex, _message);
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _messageNonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
require(isL1MessageSent[_xDomainCalldataHash], "Provided message has not been enqueued");
// cannot replay dropped message
require(!isL1MessageDropped[_xDomainCalldataHash], "Message already dropped");
// compute and deduct the messaging fee to fee vault.
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_newGasLimit);
@@ -171,8 +201,28 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
}
// enqueue the new transaction
uint256 _nextQueueIndex = IL1MessageQueue(_messageQueue).nextCrossDomainMessageIndex();
IL1MessageQueue(_messageQueue).appendCrossDomainMessage(_counterpart, _newGasLimit, _xDomainCalldata);
ReplayState memory _replayState = replayStates[_xDomainCalldataHash];
// update the replayed message chain.
unchecked {
if (_replayState.lastIndex == 0) {
// the message has not been replayed before.
prevReplayIndex[_nextQueueIndex] = _messageNonce + 1;
} else {
prevReplayIndex[_nextQueueIndex] = _replayState.lastIndex + 1;
}
}
_replayState.lastIndex = uint128(_nextQueueIndex);
// update replay times
require(_replayState.times < maxReplayTimes, "Exceed maximum replay times");
unchecked {
_replayState.times += 1;
}
replayStates[_xDomainCalldataHash] = _replayState;
// refund fee to `_refundAddress`
unchecked {
uint256 _refund = msg.value - _fee;
@@ -183,6 +233,60 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
}
}
/// @inheritdoc IL1ScrollMessenger
function dropMessage(
address _from,
address _to,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) external override whenNotPaused notInExecution {
// The criteria for dropping a message:
// 1. The message is a L1 message.
// 2. The message has not been dropped before.
// 3. the message and all of its replacement are finalized in L1.
// 4. the message and all of its replacement are skipped.
//
// Possible denial of service attack:
// + replayMessage is called every time someone want to drop the message.
// + replayMessage is called so many times for a skipped message, thus results a long list.
//
// We limit the number of `replayMessage` calls of each message, which may solve the above problem.
address _messageQueue = messageQueue;
// check message exists
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _messageNonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
require(isL1MessageSent[_xDomainCalldataHash], "Provided message has not been enqueued");
// check message not dropped
require(!isL1MessageDropped[_xDomainCalldataHash], "Message already dropped");
// check message is finalized
uint256 _lastIndex = replayStates[_xDomainCalldataHash].lastIndex;
if (_lastIndex == 0) _lastIndex = _messageNonce;
// check message is skipped and drop it.
// @note If the list is very long, the message may never be dropped.
while (true) {
IL1MessageQueue(_messageQueue).dropCrossDomainMessage(_lastIndex);
_lastIndex = prevReplayIndex[_lastIndex];
if (_lastIndex == 0) break;
unchecked {
_lastIndex = _lastIndex - 1;
}
}
isL1MessageDropped[_xDomainCalldataHash] = true;
// set execution context
xDomainMessageSender = ScrollConstants.DROP_XDOMAIN_MESSAGE_SENDER;
IMessageDropCallback(_from).onDropMessage{value: _value}(_message);
// clear execution context
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
}
/************************
* Restricted Functions *
************************/
@@ -198,6 +302,15 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
}
}
/// @notice Update max replay times.
/// @dev This function can only called by contract owner.
/// @param _maxReplayTimes The new max replay times.
function updateMaxReplayTimes(uint256 _maxReplayTimes) external onlyOwner {
maxReplayTimes = _maxReplayTimes;
emit UpdateMaxReplayTimes(_maxReplayTimes);
}
/**********************
* Internal Functions *
**********************/

View File

@@ -72,6 +72,20 @@ interface IL1ERC1155Gateway {
uint256[] _amounts
);
/// @notice Emitted when some ERC1155 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenId The id of token refunded.
/// @param amount The amount of token refunded.
event RefundERC1155(address indexed token, address indexed recipient, uint256 tokenId, uint256 amount);
/// @notice Emitted when some ERC1155 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenIds The list of ids of token refunded.
/// @param amounts The list of amount of token refunded.
event BatchRefundERC1155(address indexed token, address indexed recipient, uint256[] tokenIds, uint256[] amounts);
/*************************
* Public View Functions *
*************************/

View File

@@ -39,6 +39,12 @@ interface IL1ERC20Gateway {
bytes data
);
/// @notice Emitted when some ERC20 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param amount The amount of token refunded to receiver.
event RefundERC20(address indexed token, address indexed recipient, uint256 amount);
/*************************
* Public View Functions *
*************************/

View File

@@ -64,6 +64,18 @@ interface IL1ERC721Gateway {
uint256[] _tokenIds
);
/// @notice Emitted when some ERC721 token is refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenId The id of token refunded.
event RefundERC721(address indexed token, address indexed recipient, uint256 tokenId);
/// @notice Emitted when a batch of ERC721 tokens are refunded.
/// @param token The address of the token in L1.
/// @param recipient The address of receiver in L1.
/// @param tokenIds The list of token ids of the ERC721 NFT refunded.
event BatchRefundERC721(address indexed token, address indexed recipient, uint256[] tokenIds);
/*****************************
* Public Mutating Functions *
*****************************/

View File

@@ -21,6 +21,11 @@ interface IL1ETHGateway {
/// @param data The optional calldata passed to recipient in L2.
event DepositETH(address indexed from, address indexed to, uint256 amount, bytes data);
/// @notice Emitted when some ETH is refunded.
/// @param recipient The address of receiver in L1.
/// @param amount The amount of ETH refunded to receiver.
event RefundETH(address indexed recipient, uint256 amount);
/*****************************
* Public Mutating Functions *
*****************************/

View File

@@ -65,32 +65,6 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
return tokenMapping[_l1Token];
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
// @note can possible trigger reentrant call to this contract or messenger,
// but it seems not a big problem.
IERC20Upgradeable(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/************************
* Restricted Functions *
************************/
@@ -110,6 +84,29 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address,
address,
uint256,
bytes calldata
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0");
require(_l2Token == tokenMapping[_l1Token], "l2 token mismatch");
}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
address _token,
@@ -137,7 +134,7 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -10,6 +10,7 @@ import {IL2ERC1155Gateway} from "../../L2/gateways/IL2ERC1155Gateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ERC1155Gateway} from "./IL1ERC1155Gateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC1155Gateway
@@ -19,7 +20,13 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// NFT will be transfer to the recipient directly.
///
/// This will be changed if we have more specific scenarios.
contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL1ERC1155Gateway {
contract L1ERC1155Gateway is
OwnableUpgradeable,
ERC1155HolderUpgradeable,
ScrollGatewayBase,
IL1ERC1155Gateway,
IMessageDropCallback
{
/**********
* Events *
**********/
@@ -131,6 +138,31 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
emit FinalizeBatchWithdrawERC1155(_l1Token, _l2Token, _from, _to, _tokenIds, _amounts);
}
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
require(msg.value == 0, "nonzero msg.value");
if (bytes4(_message[0:4]) == IL2ERC1155Gateway.finalizeDepositERC1155.selector) {
(address _token, , address _sender, , uint256 _tokenId, uint256 _amount) = abi.decode(
_message[4:],
(address, address, address, address, uint256, uint256)
);
IERC1155Upgradeable(_token).safeTransferFrom(address(this), _sender, _tokenId, _amount, "");
emit RefundERC1155(_token, _sender, _tokenId, _amount);
} else if (bytes4(_message[0:4]) == IL2ERC1155Gateway.finalizeBatchDepositERC1155.selector) {
(address _token, , address _sender, , uint256[] memory _tokenIds, uint256[] memory _amounts) = abi.decode(
_message[4:],
(address, address, address, address, uint256[], uint256[])
);
IERC1155Upgradeable(_token).safeBatchTransferFrom(address(this), _sender, _tokenIds, _amounts, "");
emit BatchRefundERC1155(_token, _sender, _tokenIds, _amounts);
} else {
revert("invalid selector");
}
}
/************************
* Restricted Functions *
************************/
@@ -183,7 +215,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit DepositERC1155(_token, _l2Token, msg.sender, _to, _tokenId, _amount);
}
@@ -226,7 +258,7 @@ contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit BatchDepositERC1155(_token, _l2Token, msg.sender, _to, _tokenIds, _amounts);
}

View File

@@ -10,11 +10,24 @@ import {IL1GatewayRouter} from "./IL1GatewayRouter.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {IL2ERC20Gateway} from "../../L2/gateways/IL2ERC20Gateway.sol";
import {IScrollMessenger} from "../../libraries/IScrollMessenger.sol";
import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
// solhint-disable no-empty-blocks
abstract contract L1ERC20Gateway is ScrollGatewayBase, IL1ERC20Gateway {
abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, ScrollGatewayBase {
using SafeERC20 for IERC20;
/*************
* Variables *
*************/
/// @dev The storage slots for future usage.
uint256[50] private __gap;
/*****************************
* Public Mutating Functions *
*****************************/
@@ -49,10 +62,75 @@ abstract contract L1ERC20Gateway is ScrollGatewayBase, IL1ERC20Gateway {
_deposit(_token, _to, _amount, _data, _gasLimit);
}
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
_beforeFinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
// @note can possible trigger reentrant call to this contract or messenger,
// but it seems not a big problem.
IERC20(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
// _message should start with 0x8431f5c1 => finalizeDepositERC20(address,address,address,address,uint256,bytes)
require(bytes4(_message[0:4]) == IL2ERC20Gateway.finalizeDepositERC20.selector, "invalid selector");
// decode (token, receiver, amount)
(address _token, , address _receiver, , uint256 _amount, ) = abi.decode(
_message[4:],
(address, address, address, address, uint256, bytes)
);
// do dome check for each custom gateway
_beforeDropMessage(_token, _receiver, _amount);
IERC20(_token).safeTransfer(_receiver, _amount);
emit RefundERC20(_token, _receiver, _amount);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function hook to perform checks and actions before finalizing the withdrawal.
/// @param _l1Token The address of corresponding L1 token in L1.
/// @param _l2Token The address of corresponding L2 token in L2.
/// @param _from The address of account who withdraw the token in L2.
/// @param _to The address of recipient in L1 to receive the token.
/// @param _amount The amount of the token to withdraw.
/// @param _data Optional data to forward to recipient's account.
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) internal virtual;
/// @dev Internal function hook to perform checks and actions before dropping the message.
/// @param _token The L1 token address.
/// @param _receiver The recipient address on L1.
/// @param _amount The amount of token to refund.
function _beforeDropMessage(
address _token,
address _receiver,
uint256 _amount
) internal virtual;
/// @dev Internal function to transfer ERC20 token to this contract.
/// @param _token The address of token to transfer.
/// @param _amount The amount of token to transfer.

View File

@@ -10,6 +10,7 @@ import {IL2ERC721Gateway} from "../../L2/gateways/IL2ERC721Gateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ERC721Gateway} from "./IL1ERC721Gateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC721Gateway
@@ -19,7 +20,13 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// NFT will be transfer to the recipient directly.
///
/// This will be changed if we have more specific scenarios.
contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL1ERC721Gateway {
contract L1ERC721Gateway is
OwnableUpgradeable,
ERC721HolderUpgradeable,
ScrollGatewayBase,
IL1ERC721Gateway,
IMessageDropCallback
{
/**********
* Events *
**********/
@@ -126,6 +133,32 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
emit FinalizeBatchWithdrawERC721(_l1Token, _l2Token, _from, _to, _tokenIds);
}
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
require(msg.value == 0, "nonzero msg.value");
if (bytes4(_message[0:4]) == IL2ERC721Gateway.finalizeDepositERC721.selector) {
(address _token, , address _receiver, , uint256 _tokenId) = abi.decode(
_message[4:],
(address, address, address, address, uint256)
);
IERC721Upgradeable(_token).safeTransferFrom(address(this), _receiver, _tokenId);
emit RefundERC721(_token, _receiver, _tokenId);
} else if (bytes4(_message[0:4]) == IL2ERC721Gateway.finalizeBatchDepositERC721.selector) {
(address _token, , address _receiver, , uint256[] memory _tokenIds) = abi.decode(
_message[4:],
(address, address, address, address, uint256[])
);
for (uint256 i = 0; i < _tokenIds.length; i++) {
IERC721Upgradeable(_token).safeTransferFrom(address(this), _receiver, _tokenIds[i]);
}
emit BatchRefundERC721(_token, _receiver, _tokenIds);
} else {
revert("invalid selector");
}
}
/************************
* Restricted Functions *
************************/
@@ -173,7 +206,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit DepositERC721(_token, _l2Token, msg.sender, _to, _tokenId);
}
@@ -210,7 +243,7 @@ contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, msg.sender);
emit BatchDepositERC721(_token, _l2Token, msg.sender, _to, _tokenIds);
}

View File

@@ -8,14 +8,17 @@ import {IL2ETHGateway} from "../../L2/gateways/IL2ETHGateway.sol";
import {IL1ScrollMessenger} from "../IL1ScrollMessenger.sol";
import {IL1ETHGateway} from "./IL1ETHGateway.sol";
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
// solhint-disable avoid-low-level-calls
/// @title L1ETHGateway
/// @notice The `L1ETHGateway` is used to deposit ETH in layer 1 and
/// finalize withdraw ETH from layer 2.
/// @dev The deposited ETH tokens are held in this gateway. On finalizing withdraw, the corresponding
/// ETH will be transfer to the recipient directly.
contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback {
/***************
* Constructor *
***************/
@@ -72,7 +75,6 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
// @note can possible trigger reentrant call to messenger,
// but it seems not a big problem.
// solhint-disable-next-line avoid-low-level-calls
(bool _success, ) = _to.call{value: _amount}("");
require(_success, "ETH transfer failed");
@@ -81,6 +83,22 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
emit FinalizeWithdrawETH(_from, _to, _amount, _data);
}
/// @inheritdoc IMessageDropCallback
function onDropMessage(bytes calldata _message) external payable virtual onlyInDropContext nonReentrant {
// _message should start with 0x232e8748 => finalizeDepositETH(address,address,uint256,bytes)
require(bytes4(_message[0:4]) == IL2ETHGateway.finalizeDepositETH.selector, "invalid selector");
// decode (receiver, amount)
(address _receiver, , uint256 _amount, ) = abi.decode(_message[4:], (address, address, uint256, bytes));
require(_amount == msg.value, "msg.value mismatch");
(bool _success, ) = _receiver.call{value: _amount}("");
require(_success, "ETH transfer failed");
emit RefundETH(_receiver, _amount);
}
/**********************
* Internal Functions *
**********************/
@@ -113,7 +131,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
_data
);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit, _from);
emit DepositETH(_from, _to, _amount, _data);
}

View File

@@ -80,35 +80,40 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
return Clones.predictDeterministicAddress(l2TokenImplementation, _salt, l2TokenFactory);
}
/*****************************
* Public Mutating Functions *
*****************************/
/**********************
* Internal Functions *
**********************/
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
address,
address,
uint256,
bytes calldata
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
require(_l2Token != address(0), "token address cannot be 0");
require(getL2ERC20Address(_l1Token) == _l2Token, "l2 token mismatch");
// @note can possible trigger reentrant call to messenger,
// but it seems not a big problem.
IERC20(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
// update `tokenMapping` on first withdraw
address _storedL2Token = tokenMapping[_l1Token];
if (_storedL2Token == address(0)) {
tokenMapping[_l1Token] = _l2Token;
} else {
require(_storedL2Token == _l2Token, "l2 token mismatch");
}
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
@@ -126,17 +131,20 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
// 2. Generate message passed to L2StandardERC20Gateway.
address _l2Token = tokenMapping[_token];
bytes memory _l2Data = _data;
bytes memory _l2Data;
if (_l2Token == address(0)) {
// It is a new token, compute and store mapping in storage.
// @note we won't update `tokenMapping` here but update the `tokenMapping` on
// first successful withdraw. This will prevent user to set arbitrary token
// metadata by setting a very small `_gasLimit` on the first tx.
_l2Token = getL2ERC20Address(_token);
tokenMapping[_token] = _l2Token;
// passing symbol/name/decimal in order to deploy in L2.
string memory _symbol = IERC20Metadata(_token).symbol();
string memory _name = IERC20Metadata(_token).name();
uint8 _decimals = IERC20Metadata(_token).decimals();
_l2Data = abi.encode(_data, abi.encode(_symbol, _name, _decimals));
_l2Data = abi.encode(true, abi.encode(_data, abi.encode(_symbol, _name, _decimals)));
} else {
_l2Data = abi.encode(false, _data);
}
bytes memory _message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
@@ -149,7 +157,7 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -70,34 +70,37 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
return l2WETH;
}
/*****************************
* Public Mutating Functions *
*****************************/
/**********************
* Internal Functions *
**********************/
/// @inheritdoc IL1ERC20Gateway
function finalizeWithdrawERC20(
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
address,
address,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
bytes calldata
) internal virtual override {
require(_l1Token == WETH, "l1 token not WETH");
require(_l2Token == l2WETH, "l2 token not WETH");
require(_amount == msg.value, "msg.value mismatch");
IWETH(_l1Token).deposit{value: _amount}();
IERC20(_l1Token).safeTransfer(_to, _amount);
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address _token,
address,
uint256 _amount
) internal virtual override {
require(_token == WETH, "token not WETH");
require(_amount == msg.value, "msg.value mismatch");
IWETH(_token).deposit{value: _amount}();
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
@@ -131,7 +134,8 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
counterpart,
_amount,
_message,
_gasLimit
_gasLimit,
_from
);
emit DepositERC20(_token, l2WETH, _from, _to, _amount, _data);

View File

@@ -29,6 +29,10 @@ interface IL1MessageQueue {
/// @param skippedBitmap A bitmap indicates whether a message is skipped.
event DequeueTransaction(uint256 startIndex, uint256 count, uint256 skippedBitmap);
/// @notice Emitted when a message is dropped from L1.
/// @param index The index of message dropped.
event DropTransaction(uint256 index);
/*************************
* Public View Functions *
*************************/
@@ -110,4 +114,7 @@ interface IL1MessageQueue {
uint256 count,
uint256 skippedBitmap
) external;
/// @notice Drop a skipped message from the queue.
function dropCrossDomainMessage(uint256 index) external;
}

View File

@@ -8,18 +8,21 @@ interface IScrollChain {
**********/
/// @notice Emitted when a new batch is committed.
/// @param batchIndex The index of the batch.
/// @param batchHash The hash of the batch.
event CommitBatch(bytes32 indexed batchHash);
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
/// @notice revert a pending batch.
/// @param batchIndex The index of the batch.
/// @param batchHash The hash of the batch
event RevertBatch(bytes32 indexed batchHash);
event RevertBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
/// @notice Emitted when a batch is finalized.
/// @param batchIndex The index of the batch.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root in layer 2 after this batch.
/// @param withdrawRoot The merkle root in layer2 after this batch.
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/*************************
* Public View Functions *

View File

@@ -9,6 +9,10 @@ import {IL1MessageQueue} from "./IL1MessageQueue.sol";
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
// solhint-disable no-empty-blocks
// solhint-disable no-inline-assembly
// solhint-disable reason-string
/// @title L1MessageQueue
/// @notice This contract will hold all L1 to L2 messages.
/// Each appended message is assigned with a unique and increasing `uint256` index.
@@ -57,6 +61,15 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
/// @notice The max gas limit of L1 transactions.
uint256 public maxGasLimit;
/**********************
* Function Modifiers *
**********************/
modifier onlyMessenger() {
require(msg.sender == messenger, "Only callable by the L1ScrollMessenger");
_;
}
/***************
* Constructor *
***************/
@@ -248,9 +261,7 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
address _target,
uint256 _gasLimit,
bytes calldata _data
) external override {
require(msg.sender == messenger, "Only callable by the L1ScrollMessenger");
) external override onlyMessenger {
// validate gas limit
_validateGasLimit(_gasLimit, _data);
@@ -302,6 +313,16 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
emit DequeueTransaction(_startIndex, _count, _skippedBitmap);
}
/// @inheritdoc IL1MessageQueue
function dropCrossDomainMessage(uint256 _index) external onlyMessenger {
require(_index < pendingQueueIndex, "cannot drop pending message");
require(messageQueue[_index] != bytes32(0), "message already dropped or executed");
messageQueue[_index] = bytes32(0);
emit DropTransaction(_index);
}
/************************
* Restricted Functions *
************************/

View File

@@ -153,8 +153,8 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
committedBatches[0] = _batchHash;
finalizedStateRoots[0] = _stateRoot;
emit CommitBatch(_batchHash);
emit FinalizeBatch(_batchHash, _stateRoot, bytes32(0));
emit CommitBatch(0, _batchHash);
emit FinalizeBatch(0, _batchHash, _stateRoot, bytes32(0));
}
/// @inheritdoc IScrollChain
@@ -248,10 +248,12 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89 + _skippedL1MessageBitmap.length);
committedBatches[_batchIndex] = _batchHash;
emit CommitBatch(_batchHash);
emit CommitBatch(_batchIndex, _batchHash);
}
/// @inheritdoc IScrollChain
/// @dev If the owner want to revert a sequence of batches by sending multiple transactions,
/// make sure to revert recent batches first.
function revertBatch(bytes calldata _batchHeader, uint256 _count) external onlyOwner {
require(_count > 0, "count must be nonzero");
@@ -260,19 +262,21 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
// check batch hash
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(memPtr);
require(committedBatches[_batchIndex] == _batchHash, "incorrect batch hash");
// make sure no gap is left when reverting from the ending to the beginning.
require(committedBatches[_batchIndex + _count] == bytes32(0), "reverting must start from the ending");
// check finalization
require(_batchIndex > lastFinalizedBatchIndex, "can only revert unfinalized batch");
while (_count > 0) {
emit RevertBatch(_batchIndex, _batchHash);
committedBatches[_batchIndex] = bytes32(0);
unchecked {
_batchIndex += 1;
_count -= 1;
}
emit RevertBatch(_batchHash);
_batchHash = committedBatches[_batchIndex];
if (_batchHash == bytes32(0)) break;
}
@@ -342,7 +346,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
}
}
emit FinalizeBatch(_batchHash, _postStateRoot, _withdrawRoot);
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/************************

View File

@@ -81,9 +81,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
ScrollMessengerBase._initialize(_counterpart, _feeVault);
maxFailedExecutionTimes = 3;
// initialize to a nonzero value
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
}
/*************************
@@ -104,16 +101,16 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
require(_expectedStateRoot != bytes32(0), "Block is not imported");
bytes32 _storageKey;
// `mapping(bytes32 => bool) public isL1MessageSent` is the 155-th slot of contract `L1ScrollMessenger`.
// `mapping(bytes32 => bool) public isL1MessageSent` is the 201-th slot of contract `L1ScrollMessenger`.
// + 1 from `Initializable`
// + 50 from `OwnableUpgradeable`
// + 50 from `ContextUpgradeable`
// + 4 from `ScrollMessengerBase`
// + 50 from `PausableUpgradeable`
// + 50 from `OwnableUpgradeable`
// + 50 from `ScrollMessengerBase`
// + 1-st in `L1ScrollMessenger`
assembly {
mstore(0x00, _msgHash)
mstore(0x20, 155)
mstore(0x20, 201)
_storageKey := keccak256(0x00, 0x40)
}
@@ -141,16 +138,16 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
require(_expectedStateRoot != bytes32(0), "Block not imported");
bytes32 _storageKey;
// `mapping(bytes32 => bool) public isL2MessageExecuted` is the 156-th slot of contract `L1ScrollMessenger`.
// `mapping(bytes32 => bool) public isL2MessageExecuted` is the 202-th slot of contract `L1ScrollMessenger`.
// + 1 from `Initializable`
// + 50 from `OwnableUpgradeable`
// + 50 from `ContextUpgradeable`
// + 4 from `ScrollMessengerBase`
// + 50 from `PausableUpgradeable`
// + 50 from `OwnableUpgradeable`
// + 50 from `ScrollMessengerBase`
// + 2-nd in `L1ScrollMessenger`
assembly {
mstore(0x00, _msgHash)
mstore(0x20, 156)
mstore(0x20, 202)
_storageKey := keccak256(0x00, 0x40)
}
@@ -214,10 +211,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
uint256 _nonce,
bytes memory _message,
L1MessageProof calldata _proof
) external override whenNotPaused {
// anti reentrance
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "Already in execution");
) external override notInExecution whenNotPaused {
// check message status
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(_from, _to, _value, _nonce, _message));
require(!isL1MessageExecuted[_xDomainCalldataHash], "Message successfully executed");

View File

@@ -7,6 +7,13 @@ import {IL2ERC20Gateway} from "./IL2ERC20Gateway.sol";
// solhint-disable no-empty-blocks
abstract contract L2ERC20Gateway is IL2ERC20Gateway {
/*************
* Variables *
*************/
/// @dev The storage slots for future usage.
uint256[50] private __gap;
/*****************************
* Public Mutating Functions *
*****************************/

View File

@@ -77,7 +77,7 @@ contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gate
address _from,
address _to,
uint256 _amount,
bytes calldata _data
bytes memory _data
) external payable override onlyCallByCounterpart nonReentrant {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token != address(0), "token address cannot be 0");
@@ -91,12 +91,13 @@ contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gate
require(_l2Token == _expectedL2Token, "l2 token mismatch");
}
bool _hasMetadata;
(_hasMetadata, _data) = abi.decode(_data, (bool, bytes));
bytes memory _deployData;
bytes memory _callData;
if (tokenMapping[_l2Token] == address(0)) {
// first deposit, update mapping
tokenMapping[_l2Token] = _l1Token;
if (_hasMetadata) {
(_callData, _deployData) = abi.decode(_data, (bytes, bytes));
} else {
require(tokenMapping[_l2Token] == _l1Token, "token mapping mismatch");
@@ -104,6 +105,9 @@ contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gate
}
if (!_l2Token.isContract()) {
// first deposit, update mapping
tokenMapping[_l2Token] = _l1Token;
_deployL2Token(_deployData, _l1Token);
}

View File

@@ -51,7 +51,7 @@ contract WETH9 {
balanceOf[msg.sender] -= wad;
}
(bool success, ) = msg.sender.call{value:wad}("");
(bool success, ) = msg.sender.call{value: wad}("");
require(success, "withdraw ETH failed");
emit Withdrawal(msg.sender, wad);

View File

@@ -28,10 +28,17 @@ pragma solidity ^0.8.0;
import {IL2ScrollMessenger} from "../L2/IL2ScrollMessenger.sol";
import {OwnableBase} from "./common/OwnableBase.sol";
// solhint-disable no-empty-blocks
// solhint-disable reason-string
/// @title FeeVault
/// @notice The FeeVault contract contains the basic logic for the various different vault contracts
/// used to hold fee revenue generated by the L2 system.
abstract contract FeeVault is OwnableBase {
/**********
* Events *
**********/
/// @notice Emits each time that a withdrawal occurs.
///
/// @param value Amount that was withdrawn (in wei).
@@ -39,6 +46,25 @@ abstract contract FeeVault is OwnableBase {
/// @param from Address that triggered the withdrawal.
event Withdrawal(uint256 value, address to, address from);
/// @notice Emits each time the owner updates the address of `messenger`.
/// @param oldMessenger The address of old messenger.
/// @param newMessenger The address of new messenger.
event UpdateMessenger(address indexed oldMessenger, address indexed newMessenger);
/// @notice Emits each time the owner updates the address of `recipient`.
/// @param oldRecipient The address of old recipient.
/// @param newRecipient The address of new recipient.
event UpdateRecipient(address indexed oldRecipient, address indexed newRecipient);
/// @notice Emits each time the owner updates the value of `minWithdrawAmount`.
/// @param oldMinWithdrawAmount The value of old `minWithdrawAmount`.
/// @param newMinWithdrawAmount The value of new `minWithdrawAmount`.
event UpdateMinWithdrawAmount(uint256 oldMinWithdrawAmount, uint256 newMinWithdrawAmount);
/*************
* Variables *
*************/
/// @notice Minimum balance before a withdrawal can be triggered.
uint256 public minWithdrawAmount;
@@ -51,6 +77,10 @@ abstract contract FeeVault is OwnableBase {
/// @notice Total amount of wei processed by the contract.
uint256 public totalProcessed;
/***************
* Constructor *
***************/
/// @param _owner The owner of the contract.
/// @param _recipient Wallet that will receive the fees on L1.
/// @param _minWithdrawalAmount Minimum balance before a withdrawal can be triggered.
@@ -65,6 +95,10 @@ abstract contract FeeVault is OwnableBase {
recipient = _recipient;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Allow the contract to receive ETH.
receive() external payable {}
@@ -92,21 +126,34 @@ abstract contract FeeVault is OwnableBase {
);
}
/************************
* Restricted Functions *
************************/
/// @notice Update the address of messenger.
/// @param _messenger The address of messenger to update.
function updateMessenger(address _messenger) external onlyOwner {
messenger = _messenger;
/// @param _newMessenger The address of messenger to update.
function updateMessenger(address _newMessenger) external onlyOwner {
address _oldMessenger = messenger;
messenger = _newMessenger;
emit UpdateMessenger(_oldMessenger, _newMessenger);
}
/// @notice Update the address of recipient.
/// @param _recipient The address of recipient to update.
function updateRecipient(address _recipient) external onlyOwner {
recipient = _recipient;
/// @param _newRecipient The address of recipient to update.
function updateRecipient(address _newRecipient) external onlyOwner {
address _oldRecipient = recipient;
recipient = _newRecipient;
emit UpdateRecipient(_oldRecipient, _newRecipient);
}
/// @notice Update the minimum withdraw amount.
/// @param _minWithdrawAmount The minimum withdraw amount to update.
function updateMinWithdrawAmount(uint256 _minWithdrawAmount) external onlyOwner {
minWithdrawAmount = _minWithdrawAmount;
/// @param _newMinWithdrawAmount The minimum withdraw amount to update.
function updateMinWithdrawAmount(uint256 _newMinWithdrawAmount) external onlyOwner {
uint256 _oldMinWithdrawAmount = minWithdrawAmount;
minWithdrawAmount = _newMinWithdrawAmount;
emit UpdateMinWithdrawAmount(_oldMinWithdrawAmount, _newMinWithdrawAmount);
}
}

View File

@@ -7,6 +7,8 @@ import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/Own
import {ScrollConstants} from "./constants/ScrollConstants.sol";
import {IScrollMessenger} from "./IScrollMessenger.sol";
// solhint-disable var-name-mixedcase
abstract contract ScrollMessengerBase is OwnableUpgradeable, IScrollMessenger {
/**********
* Events *
@@ -42,6 +44,9 @@ abstract contract ScrollMessengerBase is OwnableUpgradeable, IScrollMessenger {
/// @dev The status of for non-reentrant check.
uint256 private _lock_status;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
/**********************
* Function Modifiers *
**********************/
@@ -60,6 +65,14 @@ abstract contract ScrollMessengerBase is OwnableUpgradeable, IScrollMessenger {
_lock_status = _NOT_ENTERED;
}
modifier notInExecution() {
require(
xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER,
"Message is already in execution"
);
_;
}
/***************
* Constructor *
***************/
@@ -74,8 +87,8 @@ abstract contract ScrollMessengerBase is OwnableUpgradeable, IScrollMessenger {
feeVault = _feeVault;
}
// allow others to send ether to messenger
receive() external payable {}
// make sure only owner can send ether to messenger to avoid possible user fund loss.
receive() external payable onlyOwner {}
/************************
* Restricted Functions *

View File

@@ -0,0 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
interface IMessageDropCallback {
function onDropMessage(bytes memory message) external payable;
}

View File

@@ -5,4 +5,8 @@ pragma solidity ^0.8.0;
library ScrollConstants {
/// @notice The address of default cross chain message sender.
address internal constant DEFAULT_XDOMAIN_MESSAGE_SENDER = address(1);
/// @notice The address for dropping message.
/// @dev The first 20 bytes of keccak("drop")
address internal constant DROP_XDOMAIN_MESSAGE_SENDER = 0x6f297C61B5C92eF107fFD30CD56AFFE5A273e841;
}

View File

@@ -5,6 +5,7 @@ pragma solidity ^0.8.0;
import {IScrollGateway} from "./IScrollGateway.sol";
import {IScrollMessenger} from "../IScrollMessenger.sol";
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
import {ScrollConstants} from "../constants/ScrollConstants.sol";
abstract contract ScrollGatewayBase is IScrollGateway {
/*************
@@ -31,6 +32,9 @@ abstract contract ScrollGatewayBase is IScrollGateway {
/// @dev The status of for non-reentrant check.
uint256 private _status;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
/**********************
* Function Modifiers *
**********************/
@@ -61,6 +65,16 @@ abstract contract ScrollGatewayBase is IScrollGateway {
_;
}
modifier onlyInDropContext() {
address _messenger = messenger; // gas saving
require(msg.sender == _messenger, "only messenger can call");
require(
ScrollConstants.DROP_XDOMAIN_MESSAGE_SENDER == IScrollMessenger(_messenger).xDomainMessageSender(),
"only called in drop context"
);
_;
}
/***************
* Constructor *
***************/

View File

@@ -3,59 +3,11 @@
pragma solidity ^0.8.0;
import {IERC1155} from "@openzeppelin/contracts/token/ERC1155/IERC1155.sol";
import {IScrollERC1155Extension} from "./IScrollERC1155Extension.sol";
interface IScrollERC1155 is IERC1155 {
/// @notice Return the address of Gateway the token belongs to.
function gateway() external view returns (address);
// The recommended ERC1155 implementation for bridge token.
// deployed in L2 when original token is on L1
// deployed in L1 when original token is on L2
interface IScrollERC1155 is IERC1155, IScrollERC1155Extension {
/// @notice Return the address of counterpart token.
function counterpart() external view returns (address);
/// @notice Mint some token to recipient's account.
/// @dev Gateway Utilities, only gateway contract can call
/// @param _to The address of recipient.
/// @param _tokenId The token id to mint.
/// @param _amount The amount of token to mint.
/// @param _data The data passed to recipient
function mint(
address _to,
uint256 _tokenId,
uint256 _amount,
bytes memory _data
) external;
/// @notice Burn some token from account.
/// @dev Gateway Utilities, only gateway contract can call
/// @param _from The address of account to burn token.
/// @param _tokenId The token id to burn.
/// @param _amount The amount of token to burn.
function burn(
address _from,
uint256 _tokenId,
uint256 _amount
) external;
/// @notice Batch mint some token to recipient's account.
/// @dev Gateway Utilities, only gateway contract can call
/// @param _to The address of recipient.
/// @param _tokenIds The token id to mint.
/// @param _amounts The list of corresponding amount of token to mint.
/// @param _data The data passed to recipient
function batchMint(
address _to,
uint256[] calldata _tokenIds,
uint256[] calldata _amounts,
bytes calldata _data
) external;
/// @notice Batch burn some token from account.
/// @dev Gateway Utilities, only gateway contract can call
/// @param _from The address of account to burn token.
/// @param _tokenIds The list of token ids to burn.
/// @param _amounts The list of corresponding amount of token to burn.
function batchBurn(
address _from,
uint256[] calldata _tokenIds,
uint256[] calldata _amounts
) external;
}

Some files were not shown because too many files have changed in this diff Show More