mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
24 Commits
store_proo
...
v4.0.34
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb9070e1ae | ||
|
|
df54d518cf | ||
|
|
20e13445f8 | ||
|
|
e780994146 | ||
|
|
f9a0de0f16 | ||
|
|
3eb62880fe | ||
|
|
e2612a3d88 | ||
|
|
530db9e2e1 | ||
|
|
c12b1fd8f2 | ||
|
|
bab1982c30 | ||
|
|
b0ee9fa519 | ||
|
|
d8cc69501e | ||
|
|
2eb458cf42 | ||
|
|
3832422bc9 | ||
|
|
4d96c12e7b | ||
|
|
f56997bf15 | ||
|
|
2bd9694348 | ||
|
|
8b6c237d74 | ||
|
|
0fc6d2a5e5 | ||
|
|
0ce3b182a8 | ||
|
|
5c4f7c33fd | ||
|
|
e8c66e4597 | ||
|
|
de1d9b98ec | ||
|
|
58e07a7481 |
1
.github/workflows/bridge_history_api.yml
vendored
1
.github/workflows/bridge_history_api.yml
vendored
@@ -51,7 +51,6 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
go get ./...
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Roller
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -8,8 +8,8 @@ on:
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'roller/**'
|
||||
- '.github/workflows/roller.yml'
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
@@ -17,12 +17,12 @@ on:
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'roller/**'
|
||||
- '.github/workflows/roller.yml'
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'roller'
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: roller
|
||||
flags: prover
|
||||
compile:
|
||||
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
|
||||
runs-on: ubuntu-latest
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make roller
|
||||
make prover
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/roller/ -w .
|
||||
- run: goimports -local scroll-tech/prover/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
6
Makefile
6
Makefile
@@ -10,7 +10,7 @@ lint: ## The code's format and security checks.
|
||||
make -C common lint
|
||||
make -C coordinator lint
|
||||
make -C database lint
|
||||
make -C roller lint
|
||||
make -C prover lint
|
||||
make -C bridge-history-api lint
|
||||
|
||||
update: ## update dependencies
|
||||
@@ -20,13 +20,13 @@ update: ## update dependencies
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/roller/ -w .
|
||||
goimports -local $(PWD)/prover/ -w .
|
||||
|
||||
dev_docker: ## build docker images for development/testing usages
|
||||
docker build -t scroll_l1geth ./common/docker/l1geth/
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -9,20 +10,22 @@ import (
|
||||
"github.com/kataras/iris/v12"
|
||||
"github.com/kataras/iris/v12/mvc"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/controller"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/service"
|
||||
cutils "bridge-history-api/utils"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
app *cli.App
|
||||
db *gorm.DB
|
||||
subCtx context.Context
|
||||
)
|
||||
|
||||
var database db.OrmFactory
|
||||
|
||||
func pong(ctx iris.Context) {
|
||||
_, err := ctx.WriteString("pong")
|
||||
if err != nil {
|
||||
@@ -33,7 +36,8 @@ func pong(ctx iris.Context) {
|
||||
func setupQueryByAddressHandler(backendApp *mvc.Application) {
|
||||
// Register Dependencies.
|
||||
backendApp.Register(
|
||||
database,
|
||||
subCtx,
|
||||
db,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
|
||||
@@ -44,7 +48,8 @@ func setupQueryByAddressHandler(backendApp *mvc.Application) {
|
||||
func setupQueryClaimableHandler(backendApp *mvc.Application) {
|
||||
// Register Dependencies.
|
||||
backendApp.Register(
|
||||
database,
|
||||
subCtx,
|
||||
db,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
|
||||
@@ -54,7 +59,8 @@ func setupQueryClaimableHandler(backendApp *mvc.Application) {
|
||||
|
||||
func setupQueryByHashHandler(backendApp *mvc.Application) {
|
||||
backendApp.Register(
|
||||
database,
|
||||
subCtx,
|
||||
db,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
backendApp.Handle(new(controller.QueryHashController))
|
||||
@@ -86,15 +92,22 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
database, err = db.NewOrmFactory(cfg)
|
||||
dbCfg := &database.Config{
|
||||
DriverName: cfg.DB.DriverName,
|
||||
DSN: cfg.DB.DSN,
|
||||
MaxOpenNum: cfg.DB.MaxOpenNum,
|
||||
MaxIdleNum: cfg.DB.MaxIdleNum,
|
||||
}
|
||||
db, err = database.InitDB(dbCfg)
|
||||
if err != nil {
|
||||
log.Crit("can not connect to database", "err", err)
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.Close(); err != nil {
|
||||
log.Error("failed to close database", "err", err)
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
subCtx = ctx.Context
|
||||
bridgeApp := iris.New()
|
||||
bridgeApp.UseRouter(corsOptions)
|
||||
bridgeApp.Get("/ping", pong).Describe("healthcheck")
|
||||
|
||||
@@ -14,8 +14,10 @@ import (
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/crossmsg"
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/orm"
|
||||
cutils "bridge-history-api/utils"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,9 +57,18 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
db, err := db.NewOrmFactory(cfg)
|
||||
dbCfg := &database.Config{
|
||||
DriverName: cfg.DB.DriverName,
|
||||
DSN: cfg.DB.DSN,
|
||||
MaxOpenNum: cfg.DB.MaxOpenNum,
|
||||
MaxIdleNum: cfg.DB.MaxIdleNum,
|
||||
}
|
||||
db, err := database.InitDB(dbCfg)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := db.Close(); deferErr != nil {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
@@ -105,12 +116,14 @@ func action(ctx *cli.Context) error {
|
||||
go l2crossMsgFetcher.Start()
|
||||
defer l2crossMsgFetcher.Stop()
|
||||
|
||||
CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
|
||||
// BlockTimestamp fetcher for l1 and l2
|
||||
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
|
||||
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, CrossMsgOrm.UpdateL1BlockTimestamp, CrossMsgOrm.GetL1EarliestNoBlockTimestampHeight)
|
||||
go l1BlockTimeFetcher.Start()
|
||||
defer l1BlockTimeFetcher.Stop()
|
||||
|
||||
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
|
||||
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, CrossMsgOrm.UpdateL2BlockTimestamp, CrossMsgOrm.GetL2EarliestNoBlockTimestampHeight)
|
||||
go l2BlockTimeFetcher.Start()
|
||||
defer l2BlockTimeFetcher.Stop()
|
||||
|
||||
|
||||
@@ -2,13 +2,14 @@ package app
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/migrate"
|
||||
"bridge-history-api/orm/migrate"
|
||||
"bridge-history-api/utils"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
func getConfig(ctx *cli.Context) (*config.Config, error) {
|
||||
@@ -20,14 +21,14 @@ func getConfig(ctx *cli.Context) (*config.Config, error) {
|
||||
return dbCfg, nil
|
||||
}
|
||||
|
||||
func initDB(dbCfg *config.Config) (*sqlx.DB, error) {
|
||||
factory, err := db.NewOrmFactory(dbCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func initDB(dbCfg *config.DBConfig) (*gorm.DB, error) {
|
||||
cfg := &database.Config{
|
||||
DriverName: dbCfg.DriverName,
|
||||
DSN: dbCfg.DSN,
|
||||
MaxOpenNum: dbCfg.MaxOpenNum,
|
||||
MaxIdleNum: dbCfg.MaxIdleNum,
|
||||
}
|
||||
log.Debug("Got db config from env", "driver name", dbCfg.DB.DriverName, "dsn", dbCfg.DB.DSN)
|
||||
|
||||
return factory.GetDB(), nil
|
||||
return database.InitDB(cfg)
|
||||
}
|
||||
|
||||
// resetDB clean or reset database.
|
||||
@@ -36,11 +37,15 @@ func resetDB(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
gormDB, err := initDB(cfg.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = migrate.ResetDB(db.DB)
|
||||
db, err := gormDB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = migrate.ResetDB(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -54,12 +59,15 @@ func checkDBStatus(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
gormDB, err := initDB(cfg.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return migrate.Status(db.DB)
|
||||
db, err := gormDB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return migrate.Status(db)
|
||||
}
|
||||
|
||||
// dbVersion return the latest version
|
||||
@@ -68,12 +76,15 @@ func dbVersion(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
gormDB, err := initDB(cfg.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
version, err := migrate.Current(db.DB)
|
||||
db, err := gormDB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
version, err := migrate.Current(db)
|
||||
log.Info("show database version", "db version", version)
|
||||
|
||||
return err
|
||||
@@ -85,12 +96,15 @@ func migrateDB(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
gormDB, err := initDB(cfg.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return migrate.Migrate(db.DB)
|
||||
db, err := gormDB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return migrate.Migrate(db)
|
||||
}
|
||||
|
||||
// rollbackDB rollback db by version
|
||||
@@ -99,10 +113,14 @@ func rollbackDB(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
gormDB, err := initDB(cfg.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := gormDB.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
version := ctx.Int64("version")
|
||||
return migrate.Rollback(db.DB, &version)
|
||||
return migrate.Rollback(db, &version)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ type QueryClaimableController struct {
|
||||
|
||||
// Get defines the http get method behavior for QueryClaimableController
|
||||
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), req.Offset, req.Limit)
|
||||
if err != nil {
|
||||
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
|
||||
}
|
||||
@@ -38,7 +38,7 @@ func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.
|
||||
|
||||
// Get defines the http get method behavior for QueryAddressController
|
||||
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), req.Offset, req.Limit)
|
||||
if err != nil {
|
||||
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
|
||||
}
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
@@ -21,12 +22,13 @@ type BatchInfoFetcher struct {
|
||||
confirmation uint64
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
db db.OrmFactory
|
||||
db *gorm.DB
|
||||
rollupOrm *orm.RollupBatch
|
||||
msgProofUpdater *messageproof.MsgProofUpdater
|
||||
}
|
||||
|
||||
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db *gorm.DB, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
return &BatchInfoFetcher{
|
||||
ctx: ctx,
|
||||
scrollChainAddr: scrollChainAddr,
|
||||
@@ -35,6 +37,7 @@ func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, ba
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
db: db,
|
||||
rollupOrm: orm.NewRollupBatch(db),
|
||||
msgProofUpdater: msgProofUpdater,
|
||||
}
|
||||
}
|
||||
@@ -80,19 +83,20 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
log.Error("Can not get latest block number: ", "err", err)
|
||||
return err
|
||||
}
|
||||
latestBatch, err := b.db.GetLatestRollupBatch()
|
||||
latestBatchHeight, err := b.rollupOrm.GetLatestRollupBatchProcessedHeight(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest BatchInfo: ", "err", err)
|
||||
return err
|
||||
}
|
||||
var startHeight uint64
|
||||
if latestBatch == nil {
|
||||
if latestBatchHeight == 0 {
|
||||
log.Info("no batch record in database, start from batchInfoStartNumber", "batchInfoStartNumber", b.batchInfoStartNumber)
|
||||
startHeight = b.batchInfoStartNumber
|
||||
} else {
|
||||
startHeight = latestBatch.CommitHeight + 1
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
for from := startHeight; number >= from; from += uint64(fetchLimit) {
|
||||
to := from + uint64(fetchLimit) - 1
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
// but watch out the overflow
|
||||
if to > number {
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
|
||||
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
|
||||
type GetEarliestNoBlockTimestampHeightFunc func(ctx context.Context) (uint64, error)
|
||||
|
||||
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
|
||||
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
|
||||
type UpdateBlockTimestampFunc func(ctx context.Context, height uint64, timestamp time.Time) error
|
||||
|
||||
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
|
||||
type BlockTimestampFetcher struct {
|
||||
@@ -52,7 +52,7 @@ func (b *BlockTimestampFetcher) Start() {
|
||||
log.Error("Can not get latest block number", "err", err)
|
||||
continue
|
||||
}
|
||||
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc()
|
||||
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
continue
|
||||
@@ -63,12 +63,12 @@ func (b *BlockTimestampFetcher) Start() {
|
||||
log.Error("Can not get block by number", "err", err)
|
||||
break
|
||||
}
|
||||
err = b.updateBlockTimestampFunc(height, time.Unix(int64(block.Time), 0))
|
||||
err = b.updateBlockTimestampFunc(b.ctx, height, time.Unix(int64(block.Time), 0))
|
||||
if err != nil {
|
||||
log.Error("Can not update blockTimestamp into DB ", "err", err)
|
||||
break
|
||||
}
|
||||
height, err = b.getEarliestNoBlockTimestampHeightFunc()
|
||||
height, err = b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
break
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/modern-go/reflect2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
type MsgFetcher struct {
|
||||
ctx context.Context
|
||||
config *config.LayerConfig
|
||||
db db.OrmFactory
|
||||
db *gorm.DB
|
||||
client *ethclient.Client
|
||||
worker *FetchEventWorker
|
||||
reorgHandling ReorgHandling
|
||||
@@ -34,7 +34,7 @@ type MsgFetcher struct {
|
||||
}
|
||||
|
||||
// NewMsgFetcher creates a new MsgFetcher instance
|
||||
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
|
||||
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db *gorm.DB, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
|
||||
msgFetcher := &MsgFetcher{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -120,22 +120,23 @@ func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
|
||||
return
|
||||
}
|
||||
processedHeight, err := c.worker.G(c.db)
|
||||
processedHeight, err := c.worker.G(c.ctx, c.db)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
|
||||
}
|
||||
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
|
||||
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
|
||||
processedHeight = int64(c.config.StartHeight)
|
||||
if processedHeight <= 0 || processedHeight < c.config.StartHeight {
|
||||
processedHeight = c.config.StartHeight
|
||||
} else {
|
||||
processedHeight++
|
||||
}
|
||||
for from := processedHeight; from <= int64(number); from += fetchLimit {
|
||||
for from := processedHeight; from <= number; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
if to > int64(number) {
|
||||
to = int64(number)
|
||||
if to > number {
|
||||
to = number
|
||||
}
|
||||
err := c.worker.F(c.ctx, c.client, c.db, from, to, c.addressList)
|
||||
// watch for overflow here, tho its unlikely to happen
|
||||
err := c.worker.F(c.ctx, c.client, c.db, int64(from), int64(to), c.addressList)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
|
||||
break
|
||||
@@ -191,7 +192,7 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
|
||||
}
|
||||
// clear all our saved data, because no data is safe now
|
||||
err = c.reorgHandling(c.ctx, int64(num), c.db)
|
||||
err = c.reorgHandling(c.ctx, num, c.db)
|
||||
// if handling success then we can update the cachedHeaders
|
||||
if err == nil {
|
||||
c.cachedHeaders = c.cachedHeaders[:0]
|
||||
@@ -200,7 +201,7 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
c.reorgEndCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Int64(), c.db)
|
||||
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Uint64(), c.db)
|
||||
// if handling success then we can update the cachedHeaders
|
||||
if err == nil {
|
||||
c.cachedHeaders = c.cachedHeaders[:index+1]
|
||||
|
||||
@@ -8,23 +8,24 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// Todo : read from config
|
||||
var (
|
||||
// the number of blocks fetch per round
|
||||
fetchLimit = int64(3000)
|
||||
fetchLimit = uint64(3000)
|
||||
)
|
||||
|
||||
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
|
||||
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addressList []common.Address) error
|
||||
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database *gorm.DB, from int64, to int64, addressList []common.Address) error
|
||||
|
||||
// GetLatestProcessed is a function type that gets the latest processed block height from database
|
||||
type GetLatestProcessed func(db db.OrmFactory) (int64, error)
|
||||
type GetLatestProcessed func(ctx context.Context, db *gorm.DB) (uint64, error)
|
||||
|
||||
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
|
||||
type FetchEventWorker struct {
|
||||
@@ -34,13 +35,15 @@ type FetchEventWorker struct {
|
||||
}
|
||||
|
||||
// GetLatestL1ProcessedHeight get L1 the latest processed height
|
||||
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
crossHeight, err := db.GetLatestL1ProcessedHeight()
|
||||
func GetLatestL1ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
crossHeight, err := l1CrossMsgOrm.GetLatestL1ProcessedHeight(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 cross message processed height: ", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
relayedHeight, err := db.GetLatestRelayedHeightOnL1()
|
||||
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL1(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 relayed message processed height: ", "err", err)
|
||||
return 0, err
|
||||
@@ -52,18 +55,21 @@ func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
}
|
||||
|
||||
// GetLatestL2ProcessedHeight get L2 latest processed height
|
||||
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
crossHeight, err := db.GetLatestL2ProcessedHeight()
|
||||
func GetLatestL2ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
crossHeight, err := l2CrossMsgOrm.GetLatestL2ProcessedHeight(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 cross message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
relayedHeight, err := db.GetLatestRelayedHeightOnL2()
|
||||
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL2(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 relayed message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
l2SentHeight, err := db.GetLatestSentMsgHeightOnL2()
|
||||
l2SentHeight, err := l2SentMsgOrm.GetLatestSentMsgHeightOnL2(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 sent message processed height", "err", err)
|
||||
return 0, err
|
||||
@@ -79,7 +85,9 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
}
|
||||
|
||||
// L1FetchAndSaveEvents fetch and save events on L1
|
||||
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
|
||||
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
@@ -105,41 +113,28 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := l1CrossMsgOrm.InsertL1CrossMsg(ctx, depositL1CrossMsgs, tx); txErr != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to insert relayed msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to finish transaction", "err", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// L2FetchAndSaveEvents fetche and save events on L2
|
||||
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
|
||||
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
@@ -166,50 +161,32 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
return err
|
||||
}
|
||||
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := l2CrossMsgOrm.InsertL2CrossMsg(ctx, depositL2CrossMsgs, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if txErr := l2SentMsgOrm.InsertL2SentMsg(ctx, l2SentMsgs, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// FetchAndSaveBatchIndex fetche and save batch index
|
||||
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
|
||||
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, scrollChainAddr common.Address) error {
|
||||
rollupBatchOrm := orm.NewRollupBatch(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
@@ -228,26 +205,9 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to begin db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
if txErr := rollupBatchOrm.InsertRollupBatch(ctx, rollupBatches); txErr != nil {
|
||||
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,30 +2,32 @@ package messageproof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/orm"
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// MsgProofUpdater is used to update message proof in db
|
||||
type MsgProofUpdater struct {
|
||||
ctx context.Context
|
||||
db db.OrmFactory
|
||||
db *gorm.DB
|
||||
l2SentMsgOrm *orm.L2SentMsg
|
||||
rollupOrm *orm.RollupBatch
|
||||
withdrawTrie *WithdrawTrie
|
||||
}
|
||||
|
||||
// NewMsgProofUpdater new MsgProofUpdater instance
|
||||
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
|
||||
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db *gorm.DB) *MsgProofUpdater {
|
||||
return &MsgProofUpdater{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
l2SentMsgOrm: orm.NewL2SentMsg(db),
|
||||
rollupOrm: orm.NewRollupBatch(db),
|
||||
withdrawTrie: NewWithdrawTrie(),
|
||||
}
|
||||
}
|
||||
@@ -42,7 +44,7 @@ func (m *MsgProofUpdater) Start() {
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
latestBatch, err := m.db.GetLatestRollupBatch()
|
||||
latestBatch, err := m.rollupOrm.GetLatestRollupBatch(m.ctx)
|
||||
if err != nil {
|
||||
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
|
||||
continue
|
||||
@@ -50,7 +52,7 @@ func (m *MsgProofUpdater) Start() {
|
||||
if latestBatch == nil {
|
||||
continue
|
||||
}
|
||||
latestBatchIndexWithProof, err := m.db.GetLatestL2SentMsgBatchIndex()
|
||||
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
|
||||
continue
|
||||
@@ -62,7 +64,7 @@ func (m *MsgProofUpdater) Start() {
|
||||
start = uint64(latestBatchIndexWithProof) + 1
|
||||
}
|
||||
for i := start; i <= latestBatch.BatchIndex; i++ {
|
||||
batch, err := m.db.GetRollupBatchByIndex(i)
|
||||
batch, err := m.rollupOrm.GetRollupBatchByIndex(m.ctx, i)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
|
||||
break
|
||||
@@ -111,8 +113,8 @@ func (m *MsgProofUpdater) initialize(ctx context.Context) {
|
||||
|
||||
func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
var batch *orm.RollupBatch
|
||||
firstMsg, err := m.db.GetL2SentMessageByNonce(0)
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
firstMsg, err := m.l2SentMsgOrm.GetL2SentMessageByNonce(m.ctx, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get first l2 message: %v", err)
|
||||
}
|
||||
// no l2 message
|
||||
@@ -123,7 +125,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
}
|
||||
|
||||
// if no batch, return and wait for next try round
|
||||
batch, err = m.db.GetLatestRollupBatch()
|
||||
batch, err = m.rollupOrm.GetLatestRollupBatch(m.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest batch: %v", err)
|
||||
}
|
||||
@@ -135,7 +137,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
batchIndex := batch.BatchIndex
|
||||
for {
|
||||
var msg *orm.L2SentMsg
|
||||
msg, err = m.db.GetLatestL2SentMsgLEHeight(batch.EndBlockNumber)
|
||||
msg, err = m.l2SentMsgOrm.GetLatestL2SentMsgLEHeight(m.ctx, batch.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
|
||||
}
|
||||
@@ -159,8 +161,8 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
// iterate for next batch
|
||||
batchIndex--
|
||||
|
||||
batch, err = m.db.GetRollupBatchByIndex(batchIndex)
|
||||
if err != nil {
|
||||
batch, err = m.rollupOrm.GetRollupBatchByIndex(m.ctx, batchIndex)
|
||||
if err != nil || batch == nil {
|
||||
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
|
||||
}
|
||||
}
|
||||
@@ -191,36 +193,26 @@ func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte,
|
||||
if len(msgs) != len(proofs) {
|
||||
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
|
||||
}
|
||||
dbTx, err := m.db.Beginx()
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
for i, msg := range msgs {
|
||||
proofHex := common.Bytes2Hex(proofs[i])
|
||||
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
|
||||
if err := m.l2SentMsgOrm.UpdateL2MessageProof(m.ctx, msg.MsgHash, proofHex, batchIndex, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, msg := range msgs {
|
||||
proofHex := common.Bytes2Hex(proofs[i])
|
||||
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
|
||||
if dbTxErr := m.db.UpdateL2MessageProofInDBTx(m.ctx, dbTx, msg.MsgHash, proofHex, batchIndex); dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
return dbTxErr
|
||||
}
|
||||
}
|
||||
|
||||
if dbTxErr := dbTx.Commit(); dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
|
||||
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
|
||||
var msgProofs [][]byte
|
||||
messages, err := m.db.GetL2SentMsgMsgHashByHeightRange(firstBlock, lastBlock)
|
||||
messages, err := m.l2SentMsgOrm.GetL2SentMsgMsgHashByHeightRange(m.ctx, firstBlock, lastBlock)
|
||||
if err != nil {
|
||||
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
|
||||
return messages, msgProofs, err
|
||||
|
||||
@@ -6,12 +6,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// ReorgHandling handles reorg function type
|
||||
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
|
||||
type ReorgHandling func(ctx context.Context, reorgHeight uint64, db *gorm.DB) error
|
||||
|
||||
func reverseArray(arr []*types.Header) []*types.Header {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
@@ -60,73 +61,48 @@ func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client
|
||||
}
|
||||
|
||||
// L1ReorgHandling handles l1 reorg
|
||||
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
|
||||
dbTx, err := db.Beginx()
|
||||
if err != nil {
|
||||
log.Crit("begin db tx failed", "err", err)
|
||||
}
|
||||
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
func L1ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
if err := l1CrossMsgOrm.DeleteL1CrossMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l1 cross msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if err := relayedOrm.DeleteL1RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l1 relayed msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Error("commit tx failed", "err", err)
|
||||
return err
|
||||
log.Crit("l1 reorg handling failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// L2ReorgHandling handles l2 reorg
|
||||
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
|
||||
dbTx, err := db.Beginx()
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
func L2ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
if err := l2CrossMsgOrm.DeleteL2CrossMsgFromHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 cross msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
log.Crit("begin db tx failed", "err", err)
|
||||
}
|
||||
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if err := relayedOrm.DeleteL2RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 relayed msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
if err := l2SentMsgOrm.DeleteL2SentMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 sent msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
|
||||
log.Crit("l2 reorg handling failed", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
log.Error("commit tx failed", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type rollupBatchOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// RollupBatch is the struct for rollup_batch table
|
||||
type RollupBatch struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
BatchHash string `json:"batch_hash" db:"batch_hash"`
|
||||
CommitHeight uint64 `json:"commit_height" db:"commit_height"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
}
|
||||
|
||||
// NewRollupBatchOrm create an NewRollupBatchOrm instance
|
||||
func NewRollupBatchOrm(db *sqlx.DB) RollupBatchOrm {
|
||||
return &rollupBatchOrm{db: db}
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*RollupBatch) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
batchMaps := make([]map[string]interface{}, len(batches))
|
||||
for i, batch := range batches {
|
||||
batchMaps[i] = map[string]interface{}{
|
||||
"commit_height": batch.CommitHeight,
|
||||
"batch_index": batch.BatchIndex,
|
||||
"batch_hash": batch.BatchHash,
|
||||
"start_block_number": batch.StartBlockNumber,
|
||||
"end_block_number": batch.EndBlockNumber,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRollupBatchDBTx: failed to insert batch event msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) GetLatestRollupBatch() (*RollupBatch, error) {
|
||||
result := &RollupBatch{}
|
||||
row := b.db.QueryRowx(`SELECT id, batch_index, commit_height, batch_hash, start_block_number, end_block_number FROM rollup_batch ORDER BY batch_index DESC LIMIT 1;`)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) GetRollupBatchByIndex(index uint64) (*RollupBatch, error) {
|
||||
result := &RollupBatch{}
|
||||
row := b.db.QueryRowx(`SELECT id, batch_index, batch_hash, commit_height, start_block_number, end_block_number FROM rollup_batch WHERE batch_index = $1;`, index)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// AssetType can be ETH/ERC20/ERC1155/ERC721
|
||||
type AssetType int
|
||||
|
||||
// MsgType can be layer1/layer2 msg
|
||||
type MsgType int
|
||||
|
||||
func (a AssetType) String() string {
|
||||
switch a {
|
||||
case ETH:
|
||||
return "ETH"
|
||||
case ERC20:
|
||||
return "ERC20"
|
||||
case ERC1155:
|
||||
return "ERC1155"
|
||||
case ERC721:
|
||||
return "ERC721"
|
||||
}
|
||||
return "Unknown Asset Type"
|
||||
}
|
||||
|
||||
const (
|
||||
// ETH = 0
|
||||
ETH AssetType = iota
|
||||
// ERC20 = 1
|
||||
ERC20
|
||||
// ERC721 = 2
|
||||
ERC721
|
||||
// ERC1155 = 3
|
||||
ERC1155
|
||||
)
|
||||
|
||||
const (
|
||||
// UnknownMsg = 0
|
||||
UnknownMsg MsgType = iota
|
||||
// Layer1Msg = 1
|
||||
Layer1Msg
|
||||
// Layer2Msg = 2
|
||||
Layer2Msg
|
||||
)
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenIDs string `json:"token_ids" db:"token_ids"`
|
||||
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
// L1CrossMsgOrm provides operations on l1_cross_message table
|
||||
type L1CrossMsgOrm interface {
|
||||
GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error)
|
||||
GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error)
|
||||
BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
|
||||
// UpdateL1CrossMsgHashDBTx invoked when SentMessage event is received
|
||||
UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error
|
||||
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
|
||||
GetLatestL1ProcessedHeight() (int64, error)
|
||||
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error
|
||||
GetL1EarliestNoBlockTimestampHeight() (uint64, error)
|
||||
}
|
||||
|
||||
// L2CrossMsgOrm provides operations on cross_message table
|
||||
type L2CrossMsgOrm interface {
|
||||
GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error)
|
||||
GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error)
|
||||
BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
|
||||
// UpdateL2CrossMsgHashDBTx invoked when SentMessage event is received
|
||||
UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error
|
||||
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
|
||||
GetLatestL2ProcessedHeight() (int64, error)
|
||||
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
|
||||
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
|
||||
GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error)
|
||||
}
|
||||
|
||||
// RelayedMsgOrm provides operations on relayed_msg table
|
||||
type RelayedMsgOrm interface {
|
||||
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
|
||||
GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error)
|
||||
GetLatestRelayedHeightOnL1() (int64, error)
|
||||
GetLatestRelayedHeightOnL2() (int64, error)
|
||||
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
// L2SentMsgOrm provides operations on l2_sent_msg table
|
||||
type L2SentMsgOrm interface {
|
||||
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
|
||||
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
|
||||
GetLatestSentMsgHeightOnL2() (int64, error)
|
||||
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
|
||||
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
|
||||
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error
|
||||
GetLatestL2SentMsgBatchIndex() (int64, error)
|
||||
GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error)
|
||||
GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error)
|
||||
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
// RollupBatchOrm provides operations on rollup_batch table
|
||||
type RollupBatchOrm interface {
|
||||
GetLatestRollupBatch() (*RollupBatch, error)
|
||||
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)
|
||||
BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, messages []*RollupBatch) error
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type l1CrossMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewL1CrossMsgOrm create an NewL1CrossMsgOrm instance
|
||||
func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
|
||||
return &l1CrossMsgOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetL1CrossMsgsByAddress returns all layer1 cross messages under given address
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"amount": msg.Amount,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"msg_type": Layer1Msg,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
type l2CrossMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewL2CrossMsgOrm create an NewL2CrossMsgOrm instance
|
||||
func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
return &l2CrossMsgOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetL2CrossMsgByAddress returns all layer2 cross messages under given address
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no unprocessed layer1 messages in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
if err != nil {
|
||||
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"amount": msg.Amount,
|
||||
"msg_type": Layer2Msg,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash = ANY($1) AND msg_type = $2 AND deleted_at IS NULL;`, pq.Array(msgHashList), Layer2Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
if len(results) == 0 {
|
||||
log.Debug("no L2CrossMsg under given msg hashes", "msg hash list", msgHashList)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// L2SentMsg defines the struct for l2_sent_msg table record
|
||||
type L2SentMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
OriginalSender string `json:"original_sender" db:"original_sender"`
|
||||
TxHash string `json:"tx_hash" db:"tx_hash"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type l2SentMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewL2SentMsgOrm create an NewRollupBatchOrm instance
|
||||
func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
|
||||
return &l2SentMsgOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"original_sender": msg.OriginalSender,
|
||||
"tx_hash": msg.TxHash,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, tx_hash, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :tx_hash, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batchIndex, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &L2SentMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &L2SentMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error) {
|
||||
var count uint64
|
||||
row := l.db.QueryRowx(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1);`, address)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// RelayedMsg is the struct for relayed_msg table
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
}
|
||||
|
||||
type relayedMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewRelayedMsgOrm create an NewRelayedMsgOrm instance
|
||||
func NewRelayedMsgOrm(db *sqlx.DB) RelayedMsgOrm {
|
||||
return &relayedMsgOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error) {
|
||||
result := &RelayedMsg{}
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
// OrmFactory include all ormFactory interface
|
||||
type OrmFactory interface {
|
||||
orm.L1CrossMsgOrm
|
||||
orm.L2CrossMsgOrm
|
||||
orm.RelayedMsgOrm
|
||||
orm.L2SentMsgOrm
|
||||
orm.RollupBatchOrm
|
||||
GetTotalCrossMsgCountByAddress(sender string) (uint64, error)
|
||||
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
|
||||
GetDB() *sqlx.DB
|
||||
Beginx() (*sqlx.Tx, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
type ormFactory struct {
|
||||
orm.L1CrossMsgOrm
|
||||
orm.L2CrossMsgOrm
|
||||
orm.RelayedMsgOrm
|
||||
orm.L2SentMsgOrm
|
||||
orm.RollupBatchOrm
|
||||
*sqlx.DB
|
||||
}
|
||||
|
||||
// NewOrmFactory create an ormFactory factory include all ormFactory interface
|
||||
func NewOrmFactory(cfg *config.Config) (OrmFactory, error) {
|
||||
// Initialize sql/sqlx
|
||||
db, err := sqlx.Open(cfg.DB.DriverName, cfg.DB.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(cfg.DB.MaxOpenNum)
|
||||
db.SetMaxIdleConns(cfg.DB.MaxIdleNum)
|
||||
if err = db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ormFactory{
|
||||
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
|
||||
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
|
||||
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
|
||||
L2SentMsgOrm: orm.NewL2SentMsgOrm(db),
|
||||
RollupBatchOrm: orm.NewRollupBatchOrm(db),
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetDB() *sqlx.DB {
|
||||
return o.DB
|
||||
}
|
||||
|
||||
func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
return o.DB.Beginx()
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
if err != nil || rows == nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &orm.CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
@@ -5,15 +5,14 @@ go 1.19
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/gorm v1.25.2
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -54,7 +53,6 @@ require (
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
@@ -78,6 +76,8 @@ require (
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible // indirect
|
||||
github.com/iris-contrib/schema v0.0.6 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/kataras/blocks v0.0.7 // indirect
|
||||
github.com/kataras/golog v0.1.8 // indirect
|
||||
@@ -91,10 +91,9 @@ require (
|
||||
github.com/mailgun/raymond/v2 v2.0.48 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.25 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
|
||||
@@ -143,9 +143,6 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
@@ -251,8 +248,10 @@ github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
@@ -302,9 +301,6 @@ github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4F
|
||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
|
||||
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
@@ -332,9 +328,6 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -342,8 +335,8 @@ github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i
|
||||
github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M=
|
||||
github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY=
|
||||
github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4=
|
||||
github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg=
|
||||
github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
@@ -720,6 +713,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
|
||||
94
bridge-history-api/orm/batch.go
Normal file
94
bridge-history-api/orm/batch.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// RollupBatch is the struct for rollup_batch table
|
||||
type RollupBatch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
|
||||
CommitHeight uint64 `json:"commit_height" gorm:"column:commit_height"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewRollupBatch create an RollupBatch instance
|
||||
func NewRollupBatch(db *gorm.DB) *RollupBatch {
|
||||
return &RollupBatch{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Batch model.
|
||||
func (*RollupBatch) TableName() string {
|
||||
return "rollup_batch"
|
||||
}
|
||||
|
||||
// GetLatestRollupBatchProcessedHeight return latest processed height from rollup_batch table
|
||||
func (r *RollupBatch) GetLatestRollupBatchProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Unscoped().Select("commit_height").Order("id desc").First(&result).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RollupBatch.GetLatestRollupBatchProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.CommitHeight, nil
|
||||
}
|
||||
|
||||
// GetLatestRollupBatch return the latest rollup batch in db
|
||||
func (r *RollupBatch) GetLatestRollupBatch(ctx context.Context) (*RollupBatch, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_hash is not NULL").Order("batch_index desc").First(&result).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("RollupBatch.GetLatestRollupBatch error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetRollupBatchByIndex return the rollup batch by index
|
||||
func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (*RollupBatch, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", index).First(&result).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("RollupBatch.GetRollupBatchByIndex error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// InsertRollupBatch batch insert rollup batch into db and return the transaction
|
||||
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
err := db.WithContext(ctx).Model(&RollupBatch{}).Create(&batches).Error
|
||||
if err != nil {
|
||||
batchIndexes := make([]uint64, 0, len(batches))
|
||||
heights := make([]uint64, 0, len(batches))
|
||||
for _, batch := range batches {
|
||||
batchIndexes = append(batchIndexes, batch.BatchIndex)
|
||||
heights = append(heights, batch.CommitHeight)
|
||||
}
|
||||
log.Error("failed to insert rollup batch", "batchIndexes", batchIndexes, "heights", heights)
|
||||
return fmt.Errorf("RollupBatch.InsertRollupBatch error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
370
bridge-history-api/orm/cross_msg.go
Normal file
370
bridge-history-api/orm/cross_msg.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// AssetType can be ETH/ERC20/ERC1155/ERC721
|
||||
type AssetType int
|
||||
|
||||
// MsgType can be layer1/layer2 msg
|
||||
type MsgType int
|
||||
|
||||
func (a AssetType) String() string {
|
||||
switch a {
|
||||
case ETH:
|
||||
return "ETH"
|
||||
case ERC20:
|
||||
return "ERC20"
|
||||
case ERC1155:
|
||||
return "ERC1155"
|
||||
case ERC721:
|
||||
return "ERC721"
|
||||
}
|
||||
return "Unknown Asset Type"
|
||||
}
|
||||
|
||||
const (
|
||||
// ETH = 0
|
||||
ETH AssetType = iota
|
||||
// ERC20 = 1
|
||||
ERC20
|
||||
// ERC721 = 2
|
||||
ERC721
|
||||
// ERC1155 = 3
|
||||
ERC1155
|
||||
)
|
||||
|
||||
const (
|
||||
// UnknownMsg = 0
|
||||
UnknownMsg MsgType = iota
|
||||
// Layer1Msg = 1
|
||||
Layer1Msg
|
||||
// Layer2Msg = 2
|
||||
Layer2Msg
|
||||
)
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Amount string `json:"amount" gorm:"column:amount"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
|
||||
Layer1Token string `json:"layer1_token" gorm:"column:layer1_token;default:''"`
|
||||
Layer2Token string `json:"layer2_token" gorm:"column:layer2_token;default:''"`
|
||||
TokenIDs string `json:"token_ids" gorm:"column:token_ids;default:''"`
|
||||
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts;default:''"`
|
||||
Asset int `json:"asset" gorm:"column:asset"`
|
||||
MsgType int `json:"msg_type" gorm:"column:msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" gorm:"column:block_timestamp;default;NULL"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the CrossMsg model.
|
||||
func (*CrossMsg) TableName() string {
|
||||
return "cross_message"
|
||||
}
|
||||
|
||||
// NewCrossMsg returns a new instance of CrossMsg.
|
||||
func NewCrossMsg(db *gorm.DB) *CrossMsg {
|
||||
return &CrossMsg{db: db}
|
||||
}
|
||||
|
||||
// L1 Cross Msgs Operations
|
||||
|
||||
// GetL1CrossMsgByHash returns layer1 cross message by given hash
|
||||
func (c *CrossMsg) GetL1CrossMsgByHash(ctx context.Context, l1Hash common.Hash) (*CrossMsg, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash = ? AND msg_type = ?", l1Hash.String(), Layer1Msg).First(&result).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("CrossMsg.GetL1CrossMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL1ProcessedHeight returns the latest processed height of layer1 cross messages
|
||||
func (c *CrossMsg) GetLatestL1ProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("msg_type = ?", Layer1Msg).
|
||||
Select("height").
|
||||
Order("id DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetLatestL1ProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetL1EarliestNoBlockTimestampHeight returns the earliest layer1 cross message height which has no block timestamp
|
||||
func (c *CrossMsg) GetL1EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("block_timestamp IS NULL AND msg_type = ?", Layer1Msg).
|
||||
Select("height").
|
||||
Order("height ASC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetL1EarliestNoBlockTimestampHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertL1CrossMsg batch insert layer1 cross messages into db
|
||||
func (c *CrossMsg) InsertL1CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l1hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l1hashes = append(l1hashes, msg.Layer1Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l1 cross messages", "l1hashes", l1hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("CrossMsg.InsertL1CrossMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1CrossMsgHash update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (c *CrossMsg) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := c.db.Model(&CrossMsg{}).Where("layer1_hash = ?", l1Hash.Hex()).Update("msg_hash", msgHash.Hex()).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL1CrossMsgHash error: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// UpdateL1BlockTimestamp update layer1 block timestamp
|
||||
func (c *CrossMsg) UpdateL1BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("height = ? AND msg_type = ?", height, Layer1Msg).
|
||||
Update("block_timestamp", timestamp).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL1BlockTimestamp error: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteL1CrossMsgAfterHeight soft delete layer1 cross messages after given height
|
||||
func (c *CrossMsg) DeleteL1CrossMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Delete(&CrossMsg{}, "height > ? AND msg_type = ?", height, Layer1Msg).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.DeleteL1CrossMsgAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2 Cross Msgs Operations
|
||||
|
||||
// GetL2CrossMsgByHash returns layer2 cross message by given hash
|
||||
func (c *CrossMsg) GetL2CrossMsgByHash(ctx context.Context, l2Hash common.Hash) (*CrossMsg, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer1Msg).First(&result).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL2ProcessedHeight returns the latest processed height of layer2 cross messages
|
||||
func (c *CrossMsg) GetLatestL2ProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Select("height").
|
||||
Where("msg_type = ?", Layer2Msg).
|
||||
Order("id DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetLatestL2ProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetL2CrossMsgByMsgHashList returns layer2 cross messages under given msg hashes
|
||||
func (c *CrossMsg) GetL2CrossMsgByMsgHashList(ctx context.Context, msgHashList []string) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("msg_hash IN (?) AND msg_type = ?", msgHashList, Layer2Msg).
|
||||
Find(&results).
|
||||
Error
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByMsgHashList error: %w", err)
|
||||
}
|
||||
if len(results) == 0 {
|
||||
log.Debug("no CrossMsg under given msg hashes", "msg hash list", msgHashList)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetL2EarliestNoBlockTimestampHeight returns the earliest layer2 cross message height which has no block timestamp
|
||||
func (c *CrossMsg) GetL2EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("block_timestamp IS NULL AND msg_type = ?", Layer2Msg).
|
||||
Select("height").
|
||||
Order("height ASC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetL2EarliestNoBlockTimestampHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertL2CrossMsg batch insert layer2 cross messages
|
||||
func (c *CrossMsg) InsertL2CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.Layer2Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 cross messages", "l2hashes", l2hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("CrossMsg.InsertL2CrossMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2CrossMsgHash update layer2 cross message hash
|
||||
func (c *CrossMsg) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).
|
||||
Where("layer2_hash = ?", l2Hash.String()).
|
||||
Update("msg_hash", msgHash.String()).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL2CrossMsgHash error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2BlockTimestamp update layer2 cross message block timestamp
|
||||
func (c *CrossMsg) UpdateL2BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("height = ? AND msg_type = ?", height, Layer2Msg).
|
||||
Update("block_timestamp", timestamp).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL2BlockTimestamp error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2CrossMsgFromHeight delete layer2 cross messages from given height
|
||||
func (c *CrossMsg) DeleteL2CrossMsgFromHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Delete("height > ? AND msg_type = ?", height, Layer2Msg).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.DeleteL2CrossMsgFromHeight error: %w", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// General Operations
|
||||
|
||||
// GetTotalCrossMsgCountByAddress get total cross msg count by address
|
||||
func (c *CrossMsg) GetTotalCrossMsgCountByAddress(ctx context.Context, sender string) (uint64, error) {
|
||||
var count int64
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("sender = ?", sender).
|
||||
Count(&count).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetTotalCrossMsgCountByAddress error: %w", err)
|
||||
|
||||
}
|
||||
return uint64(count), nil
|
||||
}
|
||||
|
||||
// GetCrossMsgsByAddressWithOffset get cross msgs by address with offset
|
||||
func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender string, offset int, limit int) ([]CrossMsg, error) {
|
||||
var messages []CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("sender = ?", sender).
|
||||
Order("block_timestamp DESC NULLS FIRST, id DESC").
|
||||
Limit(limit).
|
||||
Offset(offset).
|
||||
Find(&messages).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByAddressWithOffset error: %w", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
215
bridge-history-api/orm/l2_sent_msg.go
Normal file
215
bridge-history-api/orm/l2_sent_msg.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// L2SentMsg defines the struct for l2_sent_msg table record
|
||||
type L2SentMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
OriginalSender string `json:"original_sender" gorm:"column:original_sender;default:''"`
|
||||
TxHash string `json:"tx_hash" gorm:"column:tx_hash"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Value string `json:"value" gorm:"column:value"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index;default:0"`
|
||||
MsgProof string `json:"msg_proof" gorm:"column:msg_proof;default:''"`
|
||||
MsgData string `json:"msg_data" gorm:"column:msg_data;default:''"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL2SentMsg create an NewL2SentMsg instance
|
||||
func NewL2SentMsg(db *gorm.DB) *L2SentMsg {
|
||||
return &L2SentMsg{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the L2SentMsg model.
|
||||
func (*L2SentMsg) TableName() string {
|
||||
return "l2_sent_msg"
|
||||
}
|
||||
|
||||
// GetL2SentMsgByHash get l2 sent msg by hash
|
||||
func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
|
||||
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Select("height").
|
||||
Order("nonce DESC").
|
||||
First(&result).Error
|
||||
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("L2SentMsg.GetLatestSentMsgHeightOnL2 error: %w", err)
|
||||
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
|
||||
Scan(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
|
||||
var count uint64
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
|
||||
Scan(&count).Error
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
|
||||
func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("batch_index != 0").
|
||||
Order("batch_index DESC").
|
||||
Select("batch_index").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
|
||||
}
|
||||
// Watch for overflow, tho its not likely to happen
|
||||
return int64(result.Height), nil
|
||||
}
|
||||
|
||||
// GetL2SentMsgMsgHashByHeightRange get l2 sent msg msg hash by height range
|
||||
func (l *L2SentMsg) GetL2SentMsgMsgHashByHeightRange(ctx context.Context, startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("height >= ? AND height <= ?", startHeight, endHeight).
|
||||
Order("nonce ASC").
|
||||
Find(&results).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgMsgHashByHeightRange error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetL2SentMessageByNonce get l2 sent message by nonce
|
||||
func (l *L2SentMsg) GetL2SentMessageByNonce(ctx context.Context, nonce uint64) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("nonce = ?", nonce).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMessageByNonce error: %w", err)
|
||||
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgLEHeight get latest l2 sent msg less than or equal to end block number
|
||||
func (l *L2SentMsg) GetLatestL2SentMsgLEHeight(ctx context.Context, endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("height <= ?", endBlockNumber).
|
||||
Order("nonce DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgLEHeight error: %w", err)
|
||||
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// InsertL2SentMsg batch insert l2 sent msg
|
||||
func (l *L2SentMsg) InsertL2SentMsg(ctx context.Context, messages []*L2SentMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&L2SentMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.TxHash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("L2SentMsg.InsertL2SentMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2MessageProof update l2 message proof in db tx
|
||||
func (l *L2SentMsg) UpdateL2MessageProof(ctx context.Context, msgHash string, proof string, batchIndex uint64, dbTx ...*gorm.DB) error {
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&L2SentMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
Updates(map[string]interface{}{
|
||||
"msg_proof": proof,
|
||||
"batch_index": batchIndex,
|
||||
}).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("L2SentMsg.UpdateL2MessageProof error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2SentMsgAfterHeight delete l2 sent msg after height
|
||||
func (l *L2SentMsg) DeleteL2SentMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
err := db.WithContext(ctx).Model(&L2SentMsg{}).Delete("height > ?", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("L2SentMsg.DeleteL2SentMsgAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
142
bridge-history-api/orm/relayed_msg.go
Normal file
142
bridge-history-api/orm/relayed_msg.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// RelayedMsg is the struct for relayed_msg table
|
||||
type RelayedMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewRelayedMsg create an NewRelayedMsg instance
|
||||
func NewRelayedMsg(db *gorm.DB) *RelayedMsg {
|
||||
return &RelayedMsg{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the RelayedMsg model.
|
||||
func (*RelayedMsg) TableName() string {
|
||||
return "relayed_msg"
|
||||
}
|
||||
|
||||
// GetRelayedMsgByHash get relayed msg by hash
|
||||
func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*RelayedMsg, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
|
||||
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Select("height").
|
||||
Where("layer1_hash != ''").
|
||||
Order("height DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL1 error: %w", err)
|
||||
}
|
||||
return result.Height, err
|
||||
}
|
||||
|
||||
// GetLatestRelayedHeightOnL2 get latest relayed height on l2
|
||||
func (r *RelayedMsg) GetLatestRelayedHeightOnL2(ctx context.Context) (uint64, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Select("height").
|
||||
Where("layer2_hash != ''").
|
||||
Order("height DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL2 error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertRelayedMsg batch insert relayed msg into db and return the transaction
|
||||
func (r *RelayedMsg) InsertRelayedMsg(ctx context.Context, messages []*RelayedMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
l1hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.Layer2Hash)
|
||||
l1hashes = append(l1hashes, msg.Layer1Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "l1hashes", l1hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("RelayedMsg.InsertRelayedMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL1RelayedHashAfterHeight delete l1 relayed hash after height
|
||||
func (r *RelayedMsg) DeleteL1RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).
|
||||
Delete("height > ? AND layer1_hash != ''", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RelayedMsg.DeleteL1RelayedHashAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2RelayedHashAfterHeight delete l2 relayed hash after heights
|
||||
func (r *RelayedMsg) DeleteL2RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).
|
||||
Delete("height > ? AND layer2_hash != ''", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RelayedMsg.DeleteL2RelayedHashAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,14 +1,15 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/orm"
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// Finalized the schema of tx finalized infos
|
||||
@@ -48,30 +49,33 @@ type TxHistoryInfo struct {
|
||||
|
||||
// HistoryService example service.
|
||||
type HistoryService interface {
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
GetTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error)
|
||||
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
|
||||
GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
GetClaimableTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error)
|
||||
}
|
||||
|
||||
// NewHistoryService returns a service backed with a "db"
|
||||
func NewHistoryService(db db.OrmFactory) HistoryService {
|
||||
service := &historyBackend{db: db, prefix: "Scroll-Bridge-History-Server"}
|
||||
func NewHistoryService(ctx context.Context, db *gorm.DB) HistoryService {
|
||||
service := &historyBackend{ctx: ctx, db: db, prefix: "Scroll-Bridge-History-Server"}
|
||||
return service
|
||||
}
|
||||
|
||||
type historyBackend struct {
|
||||
prefix string
|
||||
db db.OrmFactory
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// GetCrossTxClaimInfo get UserClaimInfos by address
|
||||
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
|
||||
if err != nil {
|
||||
func GetCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *UserClaimInfo {
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
rollupOrm := orm.NewRollupBatch(db)
|
||||
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
|
||||
if err != nil || l2sentMsg == nil {
|
||||
log.Debug("GetCrossTxClaimInfo failed", "error", err)
|
||||
return &UserClaimInfo{}
|
||||
}
|
||||
batch, err := db.GetRollupBatchByIndex(l2sentMsg.BatchIndex)
|
||||
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
|
||||
if err != nil {
|
||||
log.Debug("GetCrossTxClaimInfo failed", "error", err)
|
||||
return &UserClaimInfo{}
|
||||
@@ -89,10 +93,11 @@ func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
|
||||
}
|
||||
|
||||
func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory) {
|
||||
relayed, err := db.GetRelayedMsgByHash(msgHash)
|
||||
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *TxHistoryInfo, db *gorm.DB) {
|
||||
relayed := orm.NewRelayedMsg(db)
|
||||
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
|
||||
if err != nil {
|
||||
log.Error("updateCrossTxHash failed", "error", err)
|
||||
log.Debug("updateCrossTxHash failed", "error", err)
|
||||
return
|
||||
}
|
||||
if relayed == nil {
|
||||
@@ -112,13 +117,15 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
|
||||
}
|
||||
|
||||
// GetClaimableTxsByAddress get all claimable txs under given address
|
||||
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetClaimableL2SentMsgByAddressTotalNum(address.Hex())
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(h.ctx, address.Hex())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
results, err := h.db.GetClaimableL2SentMsgByAddressWithOffset(address.Hex(), offset, limit)
|
||||
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(h.ctx, address.Hex(), offset, limit)
|
||||
if err != nil || len(results) == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
@@ -126,7 +133,7 @@ func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset
|
||||
for _, result := range results {
|
||||
msgHashList = append(msgHashList, result.MsgHash)
|
||||
}
|
||||
crossMsgs, err := h.db.GetL2CrossMsgByMsgHashList(msgHashList)
|
||||
crossMsgs, err := l2CrossMsgOrm.GetL2CrossMsgByMsgHashList(h.ctx, msgHashList)
|
||||
// crossMsgs can be empty, because they can be emitted by user directly call contract
|
||||
if err != nil {
|
||||
return txHistories, 0, err
|
||||
@@ -141,7 +148,7 @@ func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset
|
||||
IsL1: false,
|
||||
BlockNumber: result.Height,
|
||||
FinalizeTx: &Finalized{},
|
||||
ClaimInfo: GetCrossTxClaimInfo(result.MsgHash, h.db),
|
||||
ClaimInfo: GetCrossTxClaimInfo(h.ctx, result.MsgHash, h.db),
|
||||
}
|
||||
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
|
||||
txInfo.Amount = crossMsg.Amount
|
||||
@@ -155,13 +162,14 @@ func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset
|
||||
}
|
||||
|
||||
// GetTxsByAddress get all txs under given address
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
|
||||
utilOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := utilOrm.GetTotalCrossMsgCountByAddress(h.ctx, address.String())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
|
||||
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(h.ctx, address.String(), offset, limit)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@@ -178,9 +186,9 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: GetCrossTxClaimInfo(msg.MsgHash, h.db),
|
||||
ClaimInfo: GetCrossTxClaimInfo(h.ctx, msg.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
|
||||
updateCrossTxHash(h.ctx, msg.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
}
|
||||
return txHistories, total, nil
|
||||
@@ -189,8 +197,9 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
// GetTxsByHashes get tx infos under given tx hashes
|
||||
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
|
||||
txHistories := make([]*TxHistoryInfo, 0)
|
||||
CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
for _, hash := range hashes {
|
||||
l1result, err := h.db.GetL1CrossMsgByHash(common.HexToHash(hash))
|
||||
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(h.ctx, common.HexToHash(hash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -207,11 +216,11 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
|
||||
Hash: "",
|
||||
},
|
||||
}
|
||||
updateCrossTxHash(l1result.MsgHash, txHistory, h.db)
|
||||
updateCrossTxHash(h.ctx, l1result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
continue
|
||||
}
|
||||
l2result, err := h.db.GetL2CrossMsgByHash(common.HexToHash(hash))
|
||||
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(h.ctx, common.HexToHash(hash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -227,9 +236,9 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: GetCrossTxClaimInfo(l2result.MsgHash, h.db),
|
||||
ClaimInfo: GetCrossTxClaimInfo(h.ctx, l2result.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(l2result.MsgHash, txHistory, h.db)
|
||||
updateCrossTxHash(h.ctx, l2result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db/orm"
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// CachedParsedTxCalldata store parsed batch infos
|
||||
@@ -46,6 +46,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
@@ -64,6 +65,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
@@ -82,6 +84,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
@@ -101,6 +104,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
@@ -128,6 +132,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
@@ -147,6 +152,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
@@ -194,6 +200,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
@@ -213,6 +220,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
@@ -232,6 +240,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
@@ -252,6 +261,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2BatchWithdrawERC721Sig:
|
||||
@@ -270,6 +280,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
@@ -289,6 +300,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
@@ -334,7 +346,6 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
// ParseBatchInfoFromScrollChain parses ScrollChain events
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
var rollupBatches []*orm.RollupBatch
|
||||
cache := make(map[string]CachedParsedTxCalldata)
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1CommitBatchEventSignature:
|
||||
@@ -344,42 +355,22 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
if _, ok := cache[vlog.TxHash.Hex()]; ok {
|
||||
c := cache[vlog.TxHash.Hex()]
|
||||
c.CallDataIndex++
|
||||
rollupBatches = append(rollupBatches, &orm.RollupBatch{
|
||||
CommitHeight: vlog.BlockNumber,
|
||||
BatchIndex: c.BatchIndices[c.CallDataIndex],
|
||||
BatchHash: event.BatchHash.Hex(),
|
||||
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
|
||||
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
|
||||
})
|
||||
cache[vlog.TxHash.Hex()] = c
|
||||
continue
|
||||
}
|
||||
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data())
|
||||
index, startBlock, endBlock, err := GetBatchRangeFromCalldataV2(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
|
||||
return rollupBatches, err
|
||||
}
|
||||
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
|
||||
CallDataIndex: 0,
|
||||
BatchIndices: indices,
|
||||
StartBlocks: startBlocks,
|
||||
EndBlocks: endBlocks,
|
||||
}
|
||||
rollupBatches = append(rollupBatches, &orm.RollupBatch{
|
||||
CommitHeight: vlog.BlockNumber,
|
||||
BatchIndex: indices[0],
|
||||
BatchIndex: index,
|
||||
BatchHash: event.BatchHash.Hex(),
|
||||
StartBlockNumber: startBlocks[0],
|
||||
EndBlockNumber: endBlocks[0],
|
||||
StartBlockNumber: startBlock,
|
||||
EndBlockNumber: endBlock,
|
||||
})
|
||||
|
||||
default:
|
||||
|
||||
@@ -78,14 +78,16 @@
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"min_l1_commit_calldata_size_per_chunk": 11234,
|
||||
"chunk_timeout_sec": 300
|
||||
"chunk_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_chunk_num_per_batch": 112,
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"min_chunk_num_per_batch": 11,
|
||||
"batch_timeout_sec": 300
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -28,19 +28,21 @@ type L2Config struct {
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
}
|
||||
|
||||
// BatchProposerConfig loads batch_proposer configuration items.
|
||||
type BatchProposerConfig struct {
|
||||
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
}
|
||||
|
||||
@@ -109,7 +109,7 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
|
||||
return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
|
||||
}
|
||||
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
|
||||
}
|
||||
|
||||
@@ -171,9 +171,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
@@ -417,7 +417,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
case types.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// It's an intermediate state. The prover manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
|
||||
@@ -94,7 +94,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
proof := &message.AggProof{
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type BatchProposer struct {
|
||||
maxL1CommitCalldataSizePerBatch uint32
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
@@ -43,6 +44,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,11 +101,40 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
firstChunk := dbChunks[0]
|
||||
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas := firstChunk.TotalL1CommitGas
|
||||
var totalChunks uint64 = 1
|
||||
totalChunks := uint64(1)
|
||||
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
|
||||
|
||||
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 16 // version in calldata
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
|
||||
if parentBatch != nil {
|
||||
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
@@ -124,12 +155,21 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks[1:] {
|
||||
totalChunks++
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
|
||||
totalChunks++
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return dbChunks[:i+1], nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type ChunkProposer struct {
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
@@ -43,6 +44,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,11 +90,12 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
firstBlock := blocks[0]
|
||||
chunk := &types.Chunk{Blocks: blocks[:1]}
|
||||
firstBlock := chunk.Blocks[0]
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas := firstBlock.EstimateL1CommitGas()
|
||||
totalL1CommitGas := chunk.EstimateL1CommitGas()
|
||||
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
@@ -105,7 +108,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
@@ -133,16 +136,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
for i, block := range blocks[1:] {
|
||||
for _, block := range blocks[1:] {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
blocks = blocks[:i+1]
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -165,5 +169,5 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return &types.Chunk{Blocks: blocks}, nil
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
@@ -169,15 +169,15 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
|
||||
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
|
||||
|
||||
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
|
||||
withdrawRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
|
||||
if err3 != nil {
|
||||
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
|
||||
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &types.WrappedBlock{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ type Batch struct {
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
|
||||
// proof
|
||||
@@ -107,7 +108,7 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.BatchProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("proof")
|
||||
@@ -118,7 +119,7 @@ func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*messa
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
var proof message.BatchProof
|
||||
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
@@ -133,6 +134,9 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
|
||||
var latestBatch Batch
|
||||
if err := db.First(&latestBatch).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
}
|
||||
return &latestBatch, nil
|
||||
@@ -211,7 +215,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
}
|
||||
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
|
||||
if err != nil {
|
||||
log.Error("failed to get the latest batch", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
@@ -258,7 +262,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
|
||||
ParentBatchHash: parentBatchHash.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
@@ -390,7 +395,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
// for unit test.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
|
||||
|
||||
@@ -26,6 +26,10 @@ type Chunk struct {
|
||||
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
|
||||
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
|
||||
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
|
||||
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
|
||||
// proof
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
@@ -118,6 +122,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
|
||||
var chunkIndex uint64
|
||||
var totalL1MessagePoppedBefore uint64
|
||||
var parentChunkHash string
|
||||
var parentChunkStateRoot string
|
||||
parentChunk, err := o.GetLatestChunk(ctx)
|
||||
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get latest chunk", "err", err)
|
||||
@@ -130,6 +136,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
if parentChunk != nil {
|
||||
chunkIndex = parentChunk.Index + 1
|
||||
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
|
||||
parentChunkHash = parentChunk.Hash
|
||||
parentChunkStateRoot = parentChunk.StateRoot
|
||||
}
|
||||
|
||||
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
|
||||
@@ -141,12 +149,10 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
var totalL2TxGas uint64
|
||||
var totalL2TxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
@@ -160,10 +166,14 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
TotalL2TxGas: totalL2TxGas,
|
||||
TotalL2TxNum: uint32(totalL2TxNum),
|
||||
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
TotalL1CommitGas: chunk.EstimateL1CommitGas(),
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
|
||||
ParentChunkHash: parentChunkHash,
|
||||
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,15 +19,16 @@ type L2Block struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// block
|
||||
Number uint64 `json:"number" gorm:"number"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
|
||||
Header string `json:"header" gorm:"header"`
|
||||
Transactions string `json:"transactions" gorm:"transactions"`
|
||||
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
|
||||
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
Number uint64 `json:"number" gorm:"number"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
|
||||
Header string `json:"header" gorm:"header"`
|
||||
Transactions string `json:"transactions" gorm:"transactions"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"withdraw_root"`
|
||||
StateRoot string `json:"state_root" gorm:"state_root"`
|
||||
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
|
||||
// chunk
|
||||
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
|
||||
@@ -67,7 +68,7 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Select("header, transactions, withdraw_root")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Order("number ASC")
|
||||
|
||||
@@ -89,7 +90,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
@@ -133,7 +134,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Select("header, transactions, withdraw_root")
|
||||
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
@@ -160,7 +161,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
@@ -184,15 +185,16 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
ParentHash: block.Header.ParentHash.String(),
|
||||
Transactions: string(txs),
|
||||
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
|
||||
TxNum: uint32(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
Header: string(header),
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
ParentHash: block.Header.ParentHash.String(),
|
||||
Transactions: string(txs),
|
||||
WithdrawRoot: block.WithdrawRoot.Hex(),
|
||||
StateRoot: block.Header.Root.Hex(),
|
||||
TxNum: uint32(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
Header: string(header),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ contract MockBridgeL1 {
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param batchHash The hash of the batch
|
||||
/// @param stateRoot The state root in layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root in layer2 after this batch.
|
||||
/// @param stateRoot The state root on layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root on layer2 after this batch.
|
||||
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
|
||||
/***********
|
||||
|
||||
@@ -80,8 +80,8 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -45,9 +45,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.AggProof{
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
|
||||
@@ -24,7 +24,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
@@ -33,17 +34,17 @@ RUN go mod download -x
|
||||
# Build coordinator
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/verifier/lib
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN mkdir -p /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
@@ -7,7 +7,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
@@ -7,7 +7,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
@@ -7,7 +7,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
@@ -7,7 +7,8 @@ COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
@@ -3,7 +3,7 @@ set -uex
|
||||
|
||||
profile_name=$1
|
||||
|
||||
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/verifier")
|
||||
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
|
||||
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
|
||||
coverpkg="scroll-tech/${profile_name}"
|
||||
|
||||
@@ -31,7 +31,7 @@ flag_management:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: roller
|
||||
- name: prover
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/utils"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
type gormLogger struct {
|
||||
@@ -50,8 +52,12 @@ func InitDB(config *Config) (*gorm.DB, error) {
|
||||
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
|
||||
Logger: &tmpGormLogger,
|
||||
NowFunc: func() time.Time {
|
||||
utc, _ := time.LoadLocation("")
|
||||
return time.Now().In(utc)
|
||||
// why set time to UTC.
|
||||
// if now set this, the inserted data time will use local timezone. like 2023-07-18 18:24:00 CST+8
|
||||
// but when inserted, store to postgres is 2023-07-18 18:24:00 UTC+0 the timezone is incorrect.
|
||||
// As mysql dsn user:pass@tcp(127.0.0.1:3306)/dbname?charset=utf8mb4&parseTime=True&loc=Local, we cant set
|
||||
// the timezone by loc=Local. but postgres's dsn don't have loc option to set timezone, so just need set the gorm option like that.
|
||||
return cutils.NowUTC()
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -10,15 +10,12 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
const nonZeroByteGas uint64 = 16
|
||||
const zeroByteGas uint64 = 4
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
@@ -78,16 +75,20 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
|
||||
// TODO: This will need to be adjusted.
|
||||
// The part added here is only the calldata cost,
|
||||
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
numL1Messages++
|
||||
continue
|
||||
}
|
||||
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
@@ -101,26 +102,19 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
|
||||
for _, b := range rlpTxData {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
|
||||
for _, b := range txLen {
|
||||
if b == 0 {
|
||||
total += zeroByteGas
|
||||
} else {
|
||||
total += nonZeroByteGas
|
||||
}
|
||||
}
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += 16 * 4 // size of a uint32 field
|
||||
total += getKeccakGas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
// staticcall
|
||||
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
|
||||
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
|
||||
@@ -135,3 +135,25 @@ func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) (common.Hash, error) {
|
||||
hash := crypto.Keccak256Hash(dataBytes)
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this chunk approximately
|
||||
func (c *Chunk) EstimateL1CommitGas() uint64 {
|
||||
var totalTxNum uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
@@ -68,28 +68,28 @@ const (
|
||||
MsgRelayFailed
|
||||
)
|
||||
|
||||
// RollerProveStatus is the roller prove status of a block batch (session)
|
||||
type RollerProveStatus int32
|
||||
// ProverProveStatus is the prover prove status of a block batch (session)
|
||||
type ProverProveStatus int32
|
||||
|
||||
const (
|
||||
// RollerProveStatusUndefined indicates an unknown roller proving status
|
||||
RollerProveStatusUndefined RollerProveStatus = iota
|
||||
// RollerAssigned indicates roller assigned but has not submitted proof
|
||||
RollerAssigned
|
||||
// RollerProofValid indicates roller has submitted valid proof
|
||||
RollerProofValid
|
||||
// RollerProofInvalid indicates roller has submitted invalid proof
|
||||
RollerProofInvalid
|
||||
// ProverProveStatusUndefined indicates an unknown prover proving status
|
||||
ProverProveStatusUndefined ProverProveStatus = iota
|
||||
// ProverAssigned indicates prover assigned but has not submitted proof
|
||||
ProverAssigned
|
||||
// ProverProofValid indicates prover has submitted valid proof
|
||||
ProverProofValid
|
||||
// ProverProofInvalid indicates prover has submitted invalid proof
|
||||
ProverProofInvalid
|
||||
)
|
||||
|
||||
func (s RollerProveStatus) String() string {
|
||||
func (s ProverProveStatus) String() string {
|
||||
switch s {
|
||||
case RollerAssigned:
|
||||
return "RollerAssigned"
|
||||
case RollerProofValid:
|
||||
return "RollerProofValid"
|
||||
case RollerProofInvalid:
|
||||
return "RollerProofInvalid"
|
||||
case ProverAssigned:
|
||||
return "ProverAssigned"
|
||||
case ProverProofValid:
|
||||
return "ProverProofValid"
|
||||
case ProverProofInvalid:
|
||||
return "ProverProofInvalid"
|
||||
default:
|
||||
return fmt.Sprintf("Bad Value: %d", int32(s))
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func (s RollerProveStatus) String() string {
|
||||
type ProverTaskFailureType int
|
||||
|
||||
const (
|
||||
// ProverTaskFailureTypeUndefined indicates an unknown roller failure type
|
||||
// ProverTaskFailureTypeUndefined indicates an unknown prover failure type
|
||||
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
|
||||
// ProverTaskFailureTypeTimeout prover task failure of timeout
|
||||
ProverTaskFailureTypeTimeout
|
||||
|
||||
@@ -6,30 +6,30 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRollerProveStatus(t *testing.T) {
|
||||
func TestProverProveStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s RollerProveStatus
|
||||
s ProverProveStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"RollerAssigned",
|
||||
RollerAssigned,
|
||||
"RollerAssigned",
|
||||
"ProverAssigned",
|
||||
ProverAssigned,
|
||||
"ProverAssigned",
|
||||
},
|
||||
{
|
||||
"RollerProofValid",
|
||||
RollerProofValid,
|
||||
"RollerProofValid",
|
||||
"ProverProofValid",
|
||||
ProverProofValid,
|
||||
"ProverProofValid",
|
||||
},
|
||||
{
|
||||
"RollerProofInvalid",
|
||||
RollerProofInvalid,
|
||||
"RollerProofInvalid",
|
||||
"ProverProofInvalid",
|
||||
ProverProofInvalid,
|
||||
"ProverProofInvalid",
|
||||
},
|
||||
{
|
||||
"Bad Value",
|
||||
RollerProveStatus(999), // Invalid value.
|
||||
ProverProveStatus(999), // Invalid value.
|
||||
"Bad Value: 999",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from roller to scroll
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
const (
|
||||
@@ -23,7 +23,7 @@ const (
|
||||
StatusProofError
|
||||
)
|
||||
|
||||
// ProofType represents the type of roller.
|
||||
// ProofType represents the type of prover.
|
||||
type ProofType uint8
|
||||
|
||||
func (r ProofType) String() string {
|
||||
@@ -40,28 +40,28 @@ func (r ProofType) String() string {
|
||||
const (
|
||||
// ProofTypeUndefined is an unknown proof type
|
||||
ProofTypeUndefined ProofType = iota
|
||||
// ProofTypeChunk is default roller, it only generates zk proof from traces.
|
||||
// ProofTypeChunk is default prover, it only generates zk proof from traces.
|
||||
ProofTypeChunk
|
||||
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Roller identification
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Roller signature
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the roller.
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// Roller name
|
||||
// Prover name
|
||||
Name string `json:"name"`
|
||||
// Roller RollerType
|
||||
RollerType ProofType `json:"roller_type,omitempty"`
|
||||
// Prover ProverType
|
||||
ProverType ProofType `json:"prover_type,omitempty"`
|
||||
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
|
||||
Version string `json:"version"`
|
||||
@@ -140,10 +140,10 @@ func (i *Identity) Hash() ([]byte, error) {
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
// Roller signature
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
|
||||
// Roller public key
|
||||
// Prover public key
|
||||
publicKey string
|
||||
}
|
||||
|
||||
@@ -204,20 +204,21 @@ func (a *ProofMsg) PublicKey() (string, error) {
|
||||
type TaskMsg struct {
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
|
||||
// For decentralization, basic provers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic provers.
|
||||
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
|
||||
// Only applicable for aggregator rollers.
|
||||
SubProofs []*AggProof `json:"sub_proofs,omitempty"`
|
||||
// Only applicable for aggregator provers.
|
||||
SubProofs []*ChunkProof `json:"sub_proofs,omitempty"`
|
||||
}
|
||||
|
||||
// ProofDetail is the message received from rollers that contains zk proof, the status of
|
||||
// ProofDetail is the message received from provers that contains zk proof, the status of
|
||||
// the proof generation succeeded, and an error message if proof generation failed.
|
||||
type ProofDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
Status RespStatus `json:"status"`
|
||||
Proof *AggProof `json:"proof"`
|
||||
Error string `json:"error,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
Status RespStatus `json:"status"`
|
||||
ChunkProof *ChunkProof `json:"chunk_proof,omitempty"`
|
||||
BatchProof *BatchProof `json:"batch_proof,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Hash return proofMsg content hash.
|
||||
@@ -231,8 +232,19 @@ func (z *ProofDetail) Hash() ([]byte, error) {
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// AggProof includes the proof and public input that are required to verification and rollup.
|
||||
type AggProof struct {
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instance []byte `json:"instance"`
|
||||
FinalPair []byte `json:"final_pair"`
|
||||
Vk []byte `json:"vk"`
|
||||
BlockCount uint `json:"block_count"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type BatchProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instance []byte `json:"instance"`
|
||||
FinalPair []byte `json:"final_pair"`
|
||||
@@ -240,9 +252,9 @@ type AggProof struct {
|
||||
BlockCount uint `json:"block_count"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an AggProof is in a legal format
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
|
||||
func (ap *AggProof) SanityCheck() error {
|
||||
func (ap *BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestGenerateToken(t *testing.T) {
|
||||
func TestIdentityHash(t *testing.T) {
|
||||
identity := &Identity{
|
||||
Name: "testName",
|
||||
RollerType: ProofTypeChunk,
|
||||
ProverType: ProofTypeChunk,
|
||||
Version: "testVersion",
|
||||
Token: "testToken",
|
||||
}
|
||||
@@ -67,7 +67,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
ChunkProof: &ChunkProof{
|
||||
Proof: []byte("testProof"),
|
||||
Instance: []byte("testInstance"),
|
||||
FinalPair: []byte("testFinalPair"),
|
||||
@@ -95,7 +95,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
ChunkProof: &ChunkProof{
|
||||
Proof: []byte("testProof"),
|
||||
Instance: []byte("testInstance"),
|
||||
FinalPair: []byte("testFinalPair"),
|
||||
@@ -106,7 +106,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "8ad894c2047166a98b1a389b716b06b01dc1bd29e950e2687ffbcb3c328edda5"
|
||||
expectedHash := "f7c02bb8b9ff58a5ba61bb76a8b1fb76a023df9da2982d073d20e67f5e72df47"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestProofMsgPublicKey(t *testing.T) {
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
Proof: &AggProof{
|
||||
ChunkProof: &ChunkProof{
|
||||
Proof: []byte("testProof"),
|
||||
Instance: []byte("testInstance"),
|
||||
FinalPair: []byte("testFinalPair"),
|
||||
|
||||
@@ -24,8 +24,8 @@ var (
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
// DBCliApp the name of mock database app.
|
||||
DBCliApp MockAppName = "db_cli-test"
|
||||
// RollerApp the name of mock roller app.
|
||||
RollerApp MockAppName = "roller-test"
|
||||
// ProverApp the name of mock prover app.
|
||||
ProverApp MockAppName = "prover-test"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
|
||||
9
common/utils/timezone.go
Normal file
9
common/utils/timezone.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package utils
|
||||
|
||||
import "time"
|
||||
|
||||
// NowUTC get the utc time.Now
|
||||
func NowUTC() time.Time {
|
||||
utc, _ := time.LoadLocation("")
|
||||
return time.Now().In(utc)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.0.25"
|
||||
var tag = "v4.0.34"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -25,5 +25,5 @@ var commit = func() string {
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
|
||||
var ZkVersion string
|
||||
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
|
||||
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
|
||||
|
||||
@@ -24,7 +24,7 @@ The execution in layer 2 may be failed due to out of gas problem. In such case,
|
||||
|
||||
### Send Message from L2 to L1
|
||||
|
||||
Similar to sending message from L1 to L2, you should call `L2ScrollMessenger.sendMessage` first in layer 2. The `L2ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. Unlike above, the Sequencer will first batch submit layer 2 transactions (or block) to `ZKRollup` contract in layer 1. Then the Sequencer will wait the proof generated by roller and submit the proof to `ZKRollup` contract in layer 1 again. Finally, anyone can call `L1ScrollMessenger.relayMessageWithProof` with correct proof to execute the message in layer 1.
|
||||
Similar to sending message from L1 to L2, you should call `L2ScrollMessenger.sendMessage` first in layer 2. The `L2ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. Unlike above, the Sequencer will first batch submit layer 2 transactions (or block) to `ZKRollup` contract in layer 1. Then the Sequencer will wait the proof generated by prover and submit the proof to `ZKRollup` contract in layer 1 again. Finally, anyone can call `L1ScrollMessenger.relayMessageWithProof` with correct proof to execute the message in layer 1.
|
||||
|
||||
Currently, for the safety reason, we only allow privileged contracts to send cross domain messages. And only privileged accounts can call `L2ScrollMessenger.relayMessage`.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L1ERC1155Gateway
|
||||
|
||||
The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT in layer 1 and finalize withdraw the NFTs from layer 2.
|
||||
The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT on layer 1 and finalize withdraw the NFTs from layer 2.
|
||||
|
||||
*The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding NFT will be transfer to the recipient directly. This will be changed if we have more specific scenarios.*
|
||||
|
||||
@@ -24,7 +24,7 @@ Deposit a list of some ERC1155 NFT to caller's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC1155 NFT in layer 1. |
|
||||
| _token | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _tokenIds | uint256[] | The list of token ids to deposit. |
|
||||
| _amounts | uint256[] | The list of corresponding number of token to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
@@ -43,8 +43,8 @@ Deposit a list of some ERC1155 NFT to a recipient's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC1155 NFT in layer 1. |
|
||||
| _to | address | The address of recipient in layer 2. |
|
||||
| _token | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenIds | uint256[] | The list of token ids to deposit. |
|
||||
| _amounts | uint256[] | The list of corresponding number of token to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
@@ -80,8 +80,8 @@ Deposit some ERC1155 NFT to a recipient's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC1155 NFT in layer 1. |
|
||||
| _to | address | The address of recipient in layer 2. |
|
||||
| _token | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenId | uint256 | The token id to deposit. |
|
||||
| _amount | uint256 | The amount of token to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
@@ -100,7 +100,7 @@ Deposit some ERC1155 NFT to caller's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC1155 NFT in layer 1. |
|
||||
| _token | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _tokenId | uint256 | The token id to deposit. |
|
||||
| _amount | uint256 | The amount of token to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
@@ -111,7 +111,7 @@ Deposit some ERC1155 NFT to caller's account on layer 2.
|
||||
function finalizeBatchWithdrawERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds, uint256[] _amounts) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway in layer 2.
|
||||
Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -121,8 +121,8 @@ Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipie
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding layer 1 token. |
|
||||
| _l2Token | address | The address of corresponding layer 2 token. |
|
||||
| _from | address | The address of account who withdraw the token in layer 2. |
|
||||
| _to | address | The address of recipient in layer 1 to receive the token. |
|
||||
| _from | address | The address of account who withdraw the token on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1 to receive the token. |
|
||||
| _tokenIds | uint256[] | The list of token ids to withdraw. |
|
||||
| _amounts | uint256[] | The list of corresponding number of token to withdraw. |
|
||||
|
||||
@@ -132,7 +132,7 @@ Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipie
|
||||
function finalizeWithdrawERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId, uint256 _amount) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway in layer 2.
|
||||
Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -142,8 +142,8 @@ Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient'
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding layer 1 token. |
|
||||
| _l2Token | address | The address of corresponding layer 2 token. |
|
||||
| _from | address | The address of account who withdraw the token in layer 2. |
|
||||
| _to | address | The address of recipient in layer 1 to receive the token. |
|
||||
| _from | address | The address of account who withdraw the token on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1 to receive the token. |
|
||||
| _tokenId | uint256 | The token id to withdraw. |
|
||||
| _amount | uint256 | The amount of token to withdraw. |
|
||||
|
||||
@@ -368,8 +368,8 @@ Update layer 2 to layer 2 token mapping.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding ERC1155 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC1155 token on layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
|
||||
|
||||
|
||||
|
||||
@@ -381,7 +381,7 @@ Update layer 2 to layer 2 token mapping.
|
||||
event BatchDepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
|
||||
Emitted when the ERC1155 NFT is batch deposited to gateway on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -421,7 +421,7 @@ Emitted when some ERC1155 token is refunded.
|
||||
event DepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
|
||||
Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -442,7 +442,7 @@ Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
|
||||
event FinalizeBatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -463,7 +463,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
|
||||
event FinalizeWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to recipient in layer 1.
|
||||
Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -544,8 +544,8 @@ Emitted when token mapping for ERC1155 token is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding ERC1155 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC1155 token on layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L1ERC721Gateway
|
||||
|
||||
The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT in layer 1 and finalize withdraw the NFTs from layer 2.
|
||||
The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT on layer 1 and finalize withdraw the NFTs from layer 2.
|
||||
|
||||
*The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding NFT will be transfer to the recipient directly. This will be changed if we have more specific scenarios.*
|
||||
|
||||
@@ -24,8 +24,8 @@ Deposit a list of some ERC721 NFT to a recipient's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC721 NFT in layer 1. |
|
||||
| _to | address | The address of recipient in layer 2. |
|
||||
| _token | address | The address of ERC721 NFT on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenIds | uint256[] | The list of token ids to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
|
||||
@@ -43,7 +43,7 @@ Deposit a list of some ERC721 NFT to caller's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC721 NFT in layer 1. |
|
||||
| _token | address | The address of ERC721 NFT on layer 1. |
|
||||
| _tokenIds | uint256[] | The list of token ids to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
|
||||
@@ -78,8 +78,8 @@ Deposit some ERC721 NFT to a recipient's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC721 NFT in layer 1. |
|
||||
| _to | address | The address of recipient in layer 2. |
|
||||
| _token | address | The address of ERC721 NFT on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenId | uint256 | The token id to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
|
||||
@@ -97,7 +97,7 @@ Deposit some ERC721 NFT to caller's account on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _token | address | The address of ERC721 NFT in layer 1. |
|
||||
| _token | address | The address of ERC721 NFT on layer 1. |
|
||||
| _tokenId | uint256 | The token id to deposit. |
|
||||
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
|
||||
|
||||
@@ -107,9 +107,9 @@ Deposit some ERC721 NFT to caller's account on layer 2.
|
||||
function finalizeBatchWithdrawERC721(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
|
||||
Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
|
||||
|
||||
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway in layer 2.*
|
||||
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway on layer 2.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -117,8 +117,8 @@ Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding layer 1 token. |
|
||||
| _l2Token | address | The address of corresponding layer 2 token. |
|
||||
| _from | address | The address of account who withdraw the token in layer 2. |
|
||||
| _to | address | The address of recipient in layer 1 to receive the token. |
|
||||
| _from | address | The address of account who withdraw the token on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1 to receive the token. |
|
||||
| _tokenIds | uint256[] | The list of token ids to withdraw. |
|
||||
|
||||
### finalizeWithdrawERC721
|
||||
@@ -127,9 +127,9 @@ Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient
|
||||
function finalizeWithdrawERC721(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
|
||||
Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
|
||||
|
||||
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway in layer 2.*
|
||||
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway on layer 2.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -137,8 +137,8 @@ Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding layer 1 token. |
|
||||
| _l2Token | address | The address of corresponding layer 2 token. |
|
||||
| _from | address | The address of account who withdraw the token in layer 2. |
|
||||
| _to | address | The address of recipient in layer 1 to receive the token. |
|
||||
| _from | address | The address of account who withdraw the token on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1 to receive the token. |
|
||||
| _tokenId | uint256 | The token id to withdraw. |
|
||||
|
||||
### initialize
|
||||
@@ -313,8 +313,8 @@ Update layer 2 to layer 2 token mapping.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding ERC721 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC721 token on layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
|
||||
|
||||
|
||||
|
||||
@@ -326,7 +326,7 @@ Update layer 2 to layer 2 token mapping.
|
||||
event BatchDepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
|
||||
Emitted when the ERC721 NFT is batch deposited to gateway on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -364,7 +364,7 @@ Emitted when a batch of ERC721 tokens are refunded.
|
||||
event DepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is deposited to gateway in layer 1.
|
||||
Emitted when the ERC721 NFT is deposited to gateway on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ Emitted when the ERC721 NFT is deposited to gateway in layer 1.
|
||||
event FinalizeBatchWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -404,7 +404,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
|
||||
event FinalizeWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to recipient in layer 1.
|
||||
Emitted when the ERC721 NFT is transfered to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -483,8 +483,8 @@ Emitted when token mapping for ERC721 token is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token | address | The address of corresponding ERC721 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC721 token on layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L1StandardERC20Gateway
|
||||
|
||||
The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens in layer 1 and finalize withdraw the tokens from layer 2.
|
||||
The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens on layer 1 and finalize withdraw the tokens from layer 2.
|
||||
|
||||
*The deposited ERC20 tokens are held in this gateway. On finalizing withdraw, the corresponding token will be transfer to the recipient directly. Any ERC20 that requires non-standard functionality should use a separate gateway.*
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L1WETHGateway
|
||||
|
||||
The `L1WETHGateway` contract is used to deposit `WETH` token in layer 1 and finalize withdraw `WETH` from layer 2.
|
||||
The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and finalize withdraw `WETH` from layer 2.
|
||||
|
||||
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L2ERC1155Gateway
|
||||
|
||||
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 and finalize deposit the NFTs from layer 1.
|
||||
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
|
||||
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
|
||||
|
||||
@@ -72,9 +72,9 @@ The address of corresponding L1/L2 Gateway contract.
|
||||
function finalizeBatchDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds, uint256[] _amounts) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway on layer 1.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -93,9 +93,9 @@ Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's
|
||||
function finalizeDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId, uint256 _amount) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway on layer 1.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -313,8 +313,8 @@ Update layer 2 to layer 1 token mapping.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC1155 token in layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
|
||||
| _l1Token | address | The address of ERC1155 token on layer 1. |
|
||||
|
||||
### withdrawERC1155
|
||||
|
||||
@@ -365,7 +365,7 @@ Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
event BatchWithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
|
||||
Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -386,7 +386,7 @@ Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
|
||||
event FinalizeBatchDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -407,7 +407,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
|
||||
event FinalizeDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
|
||||
Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -469,8 +469,8 @@ Emitted when token mapping for ERC1155 token is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC1155 token in layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
|
||||
| _l1Token | address | The address of ERC1155 token on layer 1. |
|
||||
|
||||
### WithdrawERC1155
|
||||
|
||||
@@ -478,7 +478,7 @@ Emitted when token mapping for ERC1155 token is updated.
|
||||
event WithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to gateway in layer 2.
|
||||
Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L2ERC721Gateway
|
||||
|
||||
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and finalize deposit the NFTs from layer 1.
|
||||
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
|
||||
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
|
||||
|
||||
@@ -70,9 +70,9 @@ The address of corresponding L1/L2 Gateway contract.
|
||||
function finalizeBatchDepositERC721(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway in layer 1.*
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway on layer 1.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -90,9 +90,9 @@ Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's
|
||||
function finalizeDepositERC721(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId) external nonpayable
|
||||
```
|
||||
|
||||
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway in layer 1.*
|
||||
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway on layer 1.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -260,8 +260,8 @@ Update layer 2 to layer 1 token mapping.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC721 token in layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
|
||||
| _l1Token | address | The address of ERC721 token on layer 1. |
|
||||
|
||||
### withdrawERC721
|
||||
|
||||
@@ -310,7 +310,7 @@ Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
event BatchWithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
|
||||
Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -330,7 +330,7 @@ Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
|
||||
event FinalizeBatchDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -350,7 +350,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
|
||||
event FinalizeDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to recipient in layer 2.
|
||||
Emitted when the ERC721 NFT is transfered to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -411,8 +411,8 @@ Emitted when token mapping for ERC721 token is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC721 token in layer 1. |
|
||||
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
|
||||
| _l1Token | address | The address of ERC721 token on layer 1. |
|
||||
|
||||
### WithdrawERC721
|
||||
|
||||
@@ -420,7 +420,7 @@ Emitted when token mapping for ERC721 token is updated.
|
||||
event WithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to gateway in layer 2.
|
||||
Emitted when the ERC721 NFT is transfered to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L2ScrollMessenger` contract can: 1. send messages from layer 2 to layer 1; 2. relay messages from layer 1 layer 2; 3. drop expired message due to sequencer problems.
|
||||
|
||||
*It should be a predeployed contract in layer 2 and should hold infinite amount of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.*
|
||||
*It should be a predeployed contract on layer 2 and should hold infinite amount of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.*
|
||||
|
||||
## Methods
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L2StandardERC20Gateway
|
||||
|
||||
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens in layer 2 and finalize deposit the tokens from layer 1.
|
||||
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and finalize deposit the tokens from layer 1.
|
||||
|
||||
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
> L2WETHGateway
|
||||
|
||||
The `L2WETHGateway` contract is used to withdraw `WETH` token in layer 2 and finalize deposit `WETH` from layer 1.
|
||||
The `L2WETHGateway` contract is used to withdraw `WETH` token on layer 2 and finalize deposit `WETH` from layer 1.
|
||||
|
||||
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ const config: HardhatUserConfig = {
|
||||
"IL2ERC1155Gateway",
|
||||
"IScrollStandardERC20Factory",
|
||||
"IZKRollup",
|
||||
"WETH9",
|
||||
"WrappedEther",
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
@@ -95,16 +95,16 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
});
|
||||
});
|
||||
|
||||
context("#setPaused", async () => {
|
||||
context("#setPause", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(gateway.connect(signer).setPaused(false)).to.revertedWith("Ownable: caller is not the owner");
|
||||
await expect(gateway.connect(signer).setPause(false)).to.revertedWith("Ownable: caller is not the owner");
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await gateway.paused()).to.eq(false);
|
||||
await expect(gateway.setPaused(true)).to.emit(gateway, "Paused").withArgs(deployer.address);
|
||||
await expect(gateway.setPause(true)).to.emit(gateway, "Paused").withArgs(deployer.address);
|
||||
expect(await gateway.paused()).to.eq(true);
|
||||
await expect(gateway.setPaused(false)).to.emit(gateway, "Unpaused").withArgs(deployer.address);
|
||||
await expect(gateway.setPause(false)).to.emit(gateway, "Unpaused").withArgs(deployer.address);
|
||||
expect(await gateway.paused()).to.eq(false);
|
||||
});
|
||||
});
|
||||
@@ -112,7 +112,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
|
||||
context("#sendTransaction, by EOA", async () => {
|
||||
it("should revert, when contract is paused", async () => {
|
||||
await gateway.setPaused(true);
|
||||
await gateway.setPause(true);
|
||||
await expect(
|
||||
gateway.connect(signer)["sendTransaction(address,uint256,uint256,bytes)"](signer.address, 0, 0, "0x")
|
||||
).to.revertedWith("Pausable: paused");
|
||||
@@ -246,7 +246,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
};
|
||||
|
||||
it("should revert, when contract is paused", async () => {
|
||||
await gateway.setPaused(true);
|
||||
await gateway.setPause(true);
|
||||
await expect(
|
||||
gateway
|
||||
.connect(deployer)
|
||||
|
||||
@@ -119,10 +119,16 @@ describe("L1BlockContainer", async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
const L1BlockContainer = await ethers.getContractFactory("L1BlockContainer", deployer);
|
||||
container = await L1BlockContainer.deploy(deployer.address);
|
||||
|
||||
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
|
||||
const whitelist = await Whitelist.deploy(deployer.address);
|
||||
await whitelist.updateWhitelistStatus([deployer.address], true);
|
||||
|
||||
await container.updateWhitelist(whitelist.address);
|
||||
});
|
||||
|
||||
it("should revert, when sender not allowed", async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
const [, signer] = await ethers.getSigners();
|
||||
await container.initialize(
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
@@ -130,11 +136,8 @@ describe("L1BlockContainer", async () => {
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
|
||||
const whitelist = await Whitelist.deploy(deployer.address);
|
||||
await container.updateWhitelist(whitelist.address);
|
||||
|
||||
await expect(container.importBlockHeader(constants.HashZero, [], false)).to.revertedWith(
|
||||
await expect(container.connect(signer).importBlockHeader(constants.HashZero, [], false)).to.revertedWith(
|
||||
"Not whitelisted sender"
|
||||
);
|
||||
});
|
||||
|
||||
@@ -10,8 +10,8 @@ async function main() {
|
||||
|
||||
if (!addressFile.get("WETH")) {
|
||||
console.log(">> Deploy WETH");
|
||||
const WETH9 = await ethers.getContractFactory("WETH9", deployer);
|
||||
const weth = await WETH9.deploy();
|
||||
const WrappedEther = await ethers.getContractFactory("WrappedEther", deployer);
|
||||
const weth = await WrappedEther.deploy();
|
||||
console.log(`>> waiting for transaction: ${weth.deployTransaction.hash}`);
|
||||
await weth.deployed();
|
||||
console.log(`✅ WETH deployed at ${weth.address}`);
|
||||
|
||||
@@ -15,7 +15,6 @@ import {L2GatewayRouter} from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import {L2ScrollMessenger} from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import {L2StandardERC20Gateway} from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import {L2WETHGateway} from "../../src/L2/gateways/L2WETHGateway.sol";
|
||||
import {L1BlockContainer} from "../../src/L2/predeploys/L1BlockContainer.sol";
|
||||
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {L2MessageQueue} from "../../src/L2/predeploys/L2MessageQueue.sol";
|
||||
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
@@ -31,12 +30,10 @@ contract DeployL2BridgeContracts is Script {
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
L1GasPriceOracle oracle;
|
||||
L1BlockContainer container;
|
||||
L2MessageQueue queue;
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
// predeploy contracts
|
||||
address L1_BLOCK_CONTAINER_PREDEPLOY_ADDR = vm.envOr("L1_BLOCK_CONTAINER_PREDEPLOY_ADDR", address(0));
|
||||
address L1_GAS_PRICE_ORACLE_PREDEPLOY_ADDR = vm.envOr("L1_GAS_PRICE_ORACLE_PREDEPLOY_ADDR", address(0));
|
||||
address L2_MESSAGE_QUEUE_PREDEPLOY_ADDR = vm.envOr("L2_MESSAGE_QUEUE_PREDEPLOY_ADDR", address(0));
|
||||
address L2_TX_FEE_VAULT_PREDEPLOY_ADDR = vm.envOr("L2_TX_FEE_VAULT_PREDEPLOY_ADDR", address(0));
|
||||
@@ -47,7 +44,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
|
||||
// predeploys
|
||||
deployL1GasPriceOracle();
|
||||
deployL1BlockContainer();
|
||||
deployL2MessageQueue();
|
||||
deployTxFeeVault();
|
||||
deployL2Whitelist();
|
||||
@@ -80,19 +76,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L1_GAS_PRICE_ORACLE_ADDR", address(oracle));
|
||||
}
|
||||
|
||||
function deployL1BlockContainer() internal {
|
||||
if (L1_BLOCK_CONTAINER_PREDEPLOY_ADDR != address(0)) {
|
||||
container = L1BlockContainer(L1_BLOCK_CONTAINER_PREDEPLOY_ADDR);
|
||||
logAddress("L1_BLOCK_CONTAINER_ADDR", address(L1_BLOCK_CONTAINER_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
container = new L1BlockContainer(owner);
|
||||
|
||||
logAddress("L1_BLOCK_CONTAINER_ADDR", address(container));
|
||||
}
|
||||
|
||||
function deployL2MessageQueue() internal {
|
||||
if (L2_MESSAGE_QUEUE_PREDEPLOY_ADDR != address(0)) {
|
||||
queue = L2MessageQueue(L2_MESSAGE_QUEUE_PREDEPLOY_ADDR);
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
import { console } from "forge-std/console.sol";
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import { WETH9 } from "../../src/L2/predeploys/WETH9.sol";
|
||||
import {WrappedEther} from "../../src/L2/predeploys/WrappedEther.sol";
|
||||
|
||||
contract DeployWeth is Script {
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
function run() external {
|
||||
// deploy weth only if we're running a private L1 network
|
||||
if (L1_WETH_ADDR == address(0)) {
|
||||
uint256 L1_WETH_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_WETH_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L1_WETH_DEPLOYER_PRIVATE_KEY);
|
||||
WETH9 weth = new WETH9();
|
||||
L1_WETH_ADDR = address(weth);
|
||||
vm.stopBroadcast();
|
||||
function run() external {
|
||||
// deploy weth only if we're running a private L1 network
|
||||
if (L1_WETH_ADDR == address(0)) {
|
||||
uint256 L1_WETH_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_WETH_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L1_WETH_DEPLOYER_PRIVATE_KEY);
|
||||
WrappedEther weth = new WrappedEther();
|
||||
L1_WETH_ADDR = address(weth);
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
logAddress("L1_WETH_ADDR", L1_WETH_ADDR);
|
||||
logAddress("L2_WETH_ADDR", L2_WETH_ADDR);
|
||||
}
|
||||
|
||||
logAddress("L1_WETH_ADDR", L1_WETH_ADDR);
|
||||
logAddress("L2_WETH_ADDR", L2_WETH_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
|
||||
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
|
||||
|
||||
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
@@ -61,9 +61,15 @@ contract InitializeL1BridgeContracts is Script {
|
||||
MAX_L2_TX_IN_CHUNK
|
||||
);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateProver(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
|
||||
// initialize L2GasPriceOracle
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(0, 0, 0, 0);
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(
|
||||
21000, // _txGas
|
||||
53000, // _txGasContractCreation
|
||||
4, // _zeroGas
|
||||
16 // _nonZeroGas
|
||||
);
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).updateWhitelist(L1_WHITELIST_ADDR);
|
||||
|
||||
// initialize L1MessageQueue
|
||||
@@ -137,6 +143,15 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
{
|
||||
address[] memory _tokens = new address[](1);
|
||||
_tokens[0] = L1_WETH_ADDR;
|
||||
address[] memory _gateways = new address[](1);
|
||||
_gateways[0] = L1_WETH_GATEWAY_PROXY_ADDR;
|
||||
L1GatewayRouter(L1_GATEWAY_ROUTER_PROXY_ADDR).setERC20Gateway(_tokens, _gateways);
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import {L2StandardERC20Gateway} from "../../src/L2/gateways/L2StandardERC20Gatew
|
||||
import {L2WETHGateway} from "../../src/L2/gateways/L2WETHGateway.sol";
|
||||
import {L2MessageQueue} from "../../src/L2/predeploys/L2MessageQueue.sol";
|
||||
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import {L1BlockContainer} from "../../src/L2/predeploys/L1BlockContainer.sol";
|
||||
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import {ScrollStandardERC20Factory} from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
@@ -21,6 +20,8 @@ import {ScrollStandardERC20Factory} from "../../src/libraries/token/ScrollStanda
|
||||
contract InitializeL2BridgeContracts is Script {
|
||||
uint256 deployerPrivateKey = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
@@ -31,7 +32,6 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L1_BLOCK_CONTAINER_ADDR = vm.envAddress("L1_BLOCK_CONTAINER_ADDR");
|
||||
address L1_GAS_PRICE_ORACLE_ADDR = vm.envAddress("L1_GAS_PRICE_ORACLE_ADDR");
|
||||
address L2_WHITELIST_ADDR = vm.envAddress("L2_WHITELIST_ADDR");
|
||||
address L2_MESSAGE_QUEUE_ADDR = vm.envAddress("L2_MESSAGE_QUEUE_ADDR");
|
||||
@@ -50,15 +50,11 @@ contract InitializeL2BridgeContracts is Script {
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
|
||||
// initialize L2MessageQueue
|
||||
L2MessageQueue(L2_MESSAGE_QUEUE_ADDR).initialize();
|
||||
L2MessageQueue(L2_MESSAGE_QUEUE_ADDR).updateMessenger(L2_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
L2MessageQueue(L2_MESSAGE_QUEUE_ADDR).initialize(L2_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
|
||||
// initialize L2TxFeeVault
|
||||
L2TxFeeVault(payable(L2_TX_FEE_VAULT_ADDR)).updateMessenger(L2_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
|
||||
// initialize L1BlockContainer
|
||||
L1BlockContainer(L1_BLOCK_CONTAINER_ADDR).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
// initialize L1GasPriceOracle
|
||||
L1GasPriceOracle(L1_GAS_PRICE_ORACLE_ADDR).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
@@ -115,6 +111,15 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
{
|
||||
address[] memory _tokens = new address[](1);
|
||||
_tokens[0] = L2_WETH_ADDR;
|
||||
address[] memory _gateways = new address[](1);
|
||||
_gateways[0] = L2_WETH_GATEWAY_PROXY_ADDR;
|
||||
L2GatewayRouter(L2_GATEWAY_ROUTER_PROXY_ADDR).setERC20Gateway(_tokens, _gateways);
|
||||
}
|
||||
|
||||
// initialize ScrollStandardERC20Factory
|
||||
ScrollStandardERC20Factory(L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR).transferOwnership(
|
||||
L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR
|
||||
|
||||
@@ -182,8 +182,8 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
|
||||
address _refundAddress
|
||||
) external payable override whenNotPaused notInExecution {
|
||||
// We will use a different `queueIndex` for the replaced message. However, the original `queueIndex` or `nonce`
|
||||
// is encoded in the `_message`. We will check the `xDomainCalldata` in layer 2 to avoid duplicated execution.
|
||||
// So, only one message will succeed in layer 2. If one of the message is executed successfully, the other one
|
||||
// is encoded in the `_message`. We will check the `xDomainCalldata` on layer 2 to avoid duplicated execution.
|
||||
// So, only one message will succeed on layer 2. If one of the message is executed successfully, the other one
|
||||
// will revert with "Message was already successfully executed".
|
||||
address _messageQueue = messageQueue;
|
||||
address _counterpart = counterpart;
|
||||
|
||||
@@ -152,7 +152,7 @@ contract EnforcedTxGateway is OwnableUpgradeable, ReentrancyGuardUpgradeable, Pa
|
||||
|
||||
/// @notice Pause or unpause this contract.
|
||||
/// @param _status Pause this contract if it is true, otherwise unpause this contract.
|
||||
function setPaused(bool _status) external onlyOwner {
|
||||
function setPause(bool _status) external onlyOwner {
|
||||
if (_status) {
|
||||
_pause();
|
||||
} else {
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
/// @title The interface for the ERC1155 cross chain gateway in layer 1.
|
||||
/// @title The interface for the ERC1155 cross chain gateway on layer 1.
|
||||
interface IL1ERC1155Gateway {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to recipient in layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenId The token id of the ERC1155 NFT to withdraw from layer 2.
|
||||
/// @param _amount The number of token to withdraw from layer 2.
|
||||
event FinalizeWithdrawERC1155(
|
||||
@@ -24,11 +24,11 @@ interface IL1ERC1155Gateway {
|
||||
uint256 _amount
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenIds The list of token ids of the ERC1155 NFT to withdraw from layer 2.
|
||||
/// @param _amounts The list of corresponding number of token to withdraw from layer 2.
|
||||
event FinalizeBatchWithdrawERC1155(
|
||||
@@ -40,13 +40,13 @@ interface IL1ERC1155Gateway {
|
||||
uint256[] _amounts
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _tokenId The token id of the ERC1155 NFT to deposit in layer 1.
|
||||
/// @param _amount The number of token to deposit in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id of the ERC1155 NFT to deposit on layer 1.
|
||||
/// @param _amount The number of token to deposit on layer 1.
|
||||
event DepositERC1155(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
@@ -56,13 +56,13 @@ interface IL1ERC1155Gateway {
|
||||
uint256 _amount
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _tokenIds The list of token ids of the ERC1155 NFT to deposit in layer 1.
|
||||
/// @param _amounts The list of corresponding number of token to deposit in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is batch deposited to gateway on layer 1.
|
||||
/// @param _l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids of the ERC1155 NFT to deposit on layer 1.
|
||||
/// @param _amounts The list of corresponding number of token to deposit on layer 1.
|
||||
event BatchDepositERC1155(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
@@ -91,7 +91,7 @@ interface IL1ERC1155Gateway {
|
||||
*************************/
|
||||
|
||||
/// @notice Deposit some ERC1155 NFT to caller's account on layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _amount The amount of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -103,8 +103,8 @@ interface IL1ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit some ERC1155 NFT to a recipient's account on layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _amount The amount of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -117,7 +117,7 @@ interface IL1ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit a list of some ERC1155 NFT to caller's account on layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _amounts The list of corresponding number of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -129,8 +129,8 @@ interface IL1ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit a list of some ERC1155 NFT to a recipient's account on layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _amounts The list of corresponding number of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -142,13 +142,13 @@ interface IL1ERC1155Gateway {
|
||||
uint256 _gasLimit
|
||||
) external payable;
|
||||
|
||||
/// @notice Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1.
|
||||
/// @notice Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1.
|
||||
/// The function should only be called by L1ScrollMessenger.
|
||||
/// The function should also only be called by L2ERC1155Gateway in layer 2.
|
||||
/// The function should also only be called by L2ERC1155Gateway on layer 2.
|
||||
/// @param _l1Token The address of corresponding layer 1 token.
|
||||
/// @param _l2Token The address of corresponding layer 2 token.
|
||||
/// @param _from The address of account who withdraw the token in layer 2.
|
||||
/// @param _to The address of recipient in layer 1 to receive the token.
|
||||
/// @param _from The address of account who withdraw the token on layer 2.
|
||||
/// @param _to The address of recipient on layer 1 to receive the token.
|
||||
/// @param _tokenId The token id to withdraw.
|
||||
/// @param _amount The amount of token to withdraw.
|
||||
function finalizeWithdrawERC1155(
|
||||
@@ -160,13 +160,13 @@ interface IL1ERC1155Gateway {
|
||||
uint256 _amount
|
||||
) external;
|
||||
|
||||
/// @notice Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1.
|
||||
/// @notice Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1.
|
||||
/// The function should only be called by L1ScrollMessenger.
|
||||
/// The function should also only be called by L2ERC1155Gateway in layer 2.
|
||||
/// The function should also only be called by L2ERC1155Gateway on layer 2.
|
||||
/// @param _l1Token The address of corresponding layer 1 token.
|
||||
/// @param _l2Token The address of corresponding layer 2 token.
|
||||
/// @param _from The address of account who withdraw the token in layer 2.
|
||||
/// @param _to The address of recipient in layer 1 to receive the token.
|
||||
/// @param _from The address of account who withdraw the token on layer 2.
|
||||
/// @param _to The address of recipient on layer 1 to receive the token.
|
||||
/// @param _tokenIds The list of token ids to withdraw.
|
||||
/// @param _amounts The list of corresponding number of token to withdraw.
|
||||
function finalizeBatchWithdrawERC1155(
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
/// @title The interface for the ERC721 cross chain gateway in layer 1.
|
||||
/// @title The interface for the ERC721 cross chain gateway on layer 1.
|
||||
interface IL1ERC721Gateway {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to recipient in layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenId The token id of the ERC721 NFT to withdraw from layer 2.
|
||||
event FinalizeWithdrawERC721(
|
||||
address indexed _l1Token,
|
||||
@@ -22,11 +22,11 @@ interface IL1ERC721Gateway {
|
||||
uint256 _tokenId
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenIds The list of token ids of the ERC721 NFT to withdraw from layer 2.
|
||||
event FinalizeBatchWithdrawERC721(
|
||||
address indexed _l1Token,
|
||||
@@ -36,12 +36,12 @@ interface IL1ERC721Gateway {
|
||||
uint256[] _tokenIds
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is deposited to gateway in layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _tokenId The token id of the ERC721 NFT to deposit in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is deposited to gateway on layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id of the ERC721 NFT to deposit on layer 1.
|
||||
event DepositERC721(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
@@ -50,12 +50,12 @@ interface IL1ERC721Gateway {
|
||||
uint256 _tokenId
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param _from The address of sender in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _tokenIds The list of token ids of the ERC721 NFT to deposit in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is batch deposited to gateway on layer 1.
|
||||
/// @param _l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param _l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param _from The address of sender on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids of the ERC721 NFT to deposit on layer 1.
|
||||
event BatchDepositERC721(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
@@ -81,7 +81,7 @@ interface IL1ERC721Gateway {
|
||||
*****************************/
|
||||
|
||||
/// @notice Deposit some ERC721 NFT to caller's account on layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function depositERC721(
|
||||
@@ -91,8 +91,8 @@ interface IL1ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit some ERC721 NFT to a recipient's account on layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function depositERC721(
|
||||
@@ -103,7 +103,7 @@ interface IL1ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit a list of some ERC721 NFT to caller's account on layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function batchDepositERC721(
|
||||
@@ -113,8 +113,8 @@ interface IL1ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Deposit a list of some ERC721 NFT to a recipient's account on layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function batchDepositERC721(
|
||||
@@ -124,14 +124,14 @@ interface IL1ERC721Gateway {
|
||||
uint256 _gasLimit
|
||||
) external payable;
|
||||
|
||||
/// @notice Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
|
||||
/// @notice Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L1ScrollMessenger.
|
||||
/// - The function should also only be called by L2ERC721Gateway in layer 2.
|
||||
/// - The function should also only be called by L2ERC721Gateway on layer 2.
|
||||
/// @param _l1Token The address of corresponding layer 1 token.
|
||||
/// @param _l2Token The address of corresponding layer 2 token.
|
||||
/// @param _from The address of account who withdraw the token in layer 2.
|
||||
/// @param _to The address of recipient in layer 1 to receive the token.
|
||||
/// @param _from The address of account who withdraw the token on layer 2.
|
||||
/// @param _to The address of recipient on layer 1 to receive the token.
|
||||
/// @param _tokenId The token id to withdraw.
|
||||
function finalizeWithdrawERC721(
|
||||
address _l1Token,
|
||||
@@ -141,14 +141,14 @@ interface IL1ERC721Gateway {
|
||||
uint256 _tokenId
|
||||
) external;
|
||||
|
||||
/// @notice Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
|
||||
/// @notice Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L1ScrollMessenger.
|
||||
/// - The function should also only be called by L2ERC721Gateway in layer 2.
|
||||
/// - The function should also only be called by L2ERC721Gateway on layer 2.
|
||||
/// @param _l1Token The address of corresponding layer 1 token.
|
||||
/// @param _l2Token The address of corresponding layer 2 token.
|
||||
/// @param _from The address of account who withdraw the token in layer 2.
|
||||
/// @param _to The address of recipient in layer 1 to receive the token.
|
||||
/// @param _from The address of account who withdraw the token on layer 2.
|
||||
/// @param _to The address of recipient on layer 1 to receive the token.
|
||||
/// @param _tokenIds The list of token ids to withdraw.
|
||||
function finalizeBatchWithdrawERC721(
|
||||
address _l1Token,
|
||||
|
||||
@@ -14,7 +14,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
|
||||
|
||||
/// @title L1CustomERC20Gateway
|
||||
/// @notice The `L1CustomERC20Gateway` is used to deposit custom ERC20 compatible tokens in layer 1 and
|
||||
/// @notice The `L1CustomERC20Gateway` is used to deposit custom ERC20 compatible tokens on layer 1 and
|
||||
/// finalize withdraw the tokens from layer 2.
|
||||
/// @dev The deposited tokens are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// tokens will be transfer to the recipient directly.
|
||||
@@ -26,8 +26,8 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC20 token is updated.
|
||||
/// @param _l1Token The address of ERC20 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
|
||||
/// @param _l1Token The address of ERC20 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
|
||||
event UpdateTokenMapping(address _l1Token, address _l2Token);
|
||||
|
||||
/*************
|
||||
@@ -74,8 +74,8 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 1 to layer 2 token mapping.
|
||||
/// @param _l1Token The address of ERC20 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
|
||||
/// @param _l1Token The address of ERC20 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
|
||||
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -127,14 +127,9 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
|
||||
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
|
||||
|
||||
// 2. Generate message passed to L2CustomERC20Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC20Gateway.finalizeDepositERC20.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
_from,
|
||||
_to,
|
||||
_amount,
|
||||
_data
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(_token, _l2Token, _from, _to, _amount, _data)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
|
||||
@@ -14,7 +14,7 @@ import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallba
|
||||
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1ERC1155Gateway
|
||||
/// @notice The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT in layer 1 and
|
||||
/// @notice The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT on layer 1 and
|
||||
/// finalize withdraw the NFTs from layer 2.
|
||||
/// @dev The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// NFT will be transfer to the recipient directly.
|
||||
@@ -32,8 +32,8 @@ contract L1ERC1155Gateway is
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC1155 token is updated.
|
||||
/// @param _l1Token The address of ERC1155 token in layer 1.
|
||||
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
|
||||
event UpdateTokenMapping(address _l1Token, address _l2Token);
|
||||
|
||||
/*************
|
||||
@@ -172,8 +172,8 @@ contract L1ERC1155Gateway is
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 2 to layer 2 token mapping.
|
||||
/// @param _l1Token The address of ERC1155 token in layer 1.
|
||||
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
|
||||
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -187,8 +187,8 @@ contract L1ERC1155Gateway is
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to deposit ERC1155 NFT to layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _amount The amount of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -208,14 +208,9 @@ contract L1ERC1155Gateway is
|
||||
IERC1155Upgradeable(_token).safeTransferFrom(msg.sender, address(this), _tokenId, _amount, "");
|
||||
|
||||
// 2. Generate message passed to L2ERC1155Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC1155Gateway.finalizeDepositERC1155.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenId,
|
||||
_amount
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC1155Gateway.finalizeDepositERC1155,
|
||||
(_token, _l2Token, msg.sender, _to, _tokenId, _amount)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
@@ -225,8 +220,8 @@ contract L1ERC1155Gateway is
|
||||
}
|
||||
|
||||
/// @dev Internal function to batch deposit ERC1155 NFT to layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _amounts The list of corresponding number of token to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
@@ -251,14 +246,9 @@ contract L1ERC1155Gateway is
|
||||
IERC1155Upgradeable(_token).safeBatchTransferFrom(msg.sender, address(this), _tokenIds, _amounts, "");
|
||||
|
||||
// 2. Generate message passed to L2ERC1155Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC1155Gateway.finalizeBatchDepositERC1155.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenIds,
|
||||
_amounts
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC1155Gateway.finalizeBatchDepositERC1155,
|
||||
(_token, _l2Token, msg.sender, _to, _tokenIds, _amounts)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
|
||||
@@ -14,7 +14,7 @@ import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallba
|
||||
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1ERC721Gateway
|
||||
/// @notice The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT in layer 1 and
|
||||
/// @notice The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT on layer 1 and
|
||||
/// finalize withdraw the NFTs from layer 2.
|
||||
/// @dev The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// NFT will be transfer to the recipient directly.
|
||||
@@ -32,8 +32,8 @@ contract L1ERC721Gateway is
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC721 token is updated.
|
||||
/// @param _l1Token The address of ERC721 token in layer 1.
|
||||
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
|
||||
/// @param _l1Token The address of ERC721 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
|
||||
event UpdateTokenMapping(address _l1Token, address _l2Token);
|
||||
|
||||
/*************
|
||||
@@ -168,8 +168,8 @@ contract L1ERC721Gateway is
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 2 to layer 2 token mapping.
|
||||
/// @param _l1Token The address of ERC721 token in layer 1.
|
||||
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
|
||||
/// @param _l1Token The address of ERC721 token on layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
|
||||
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
|
||||
require(_l2Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -183,8 +183,8 @@ contract L1ERC721Gateway is
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to deposit ERC721 NFT to layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function _depositERC721(
|
||||
@@ -200,13 +200,9 @@ contract L1ERC721Gateway is
|
||||
IERC721Upgradeable(_token).safeTransferFrom(msg.sender, address(this), _tokenId);
|
||||
|
||||
// 2. Generate message passed to L2ERC721Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC721Gateway.finalizeDepositERC721.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenId
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC721Gateway.finalizeDepositERC721,
|
||||
(_token, _l2Token, msg.sender, _to, _tokenId)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
@@ -216,8 +212,8 @@ contract L1ERC721Gateway is
|
||||
}
|
||||
|
||||
/// @dev Internal function to batch deposit ERC721 NFT to layer 2.
|
||||
/// @param _token The address of ERC721 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC721 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids to deposit.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
|
||||
function _batchDepositERC721(
|
||||
@@ -237,13 +233,9 @@ contract L1ERC721Gateway is
|
||||
}
|
||||
|
||||
// 2. Generate message passed to L2ERC721Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC721Gateway.finalizeBatchDepositERC721.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenIds
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC721Gateway.finalizeBatchDepositERC721,
|
||||
(_token, _l2Token, msg.sender, _to, _tokenIds)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
|
||||
@@ -14,7 +14,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
// solhint-disable avoid-low-level-calls
|
||||
|
||||
/// @title L1ETHGateway
|
||||
/// @notice The `L1ETHGateway` is used to deposit ETH in layer 1 and
|
||||
/// @notice The `L1ETHGateway` is used to deposit ETH on layer 1 and
|
||||
/// finalize withdraw ETH from layer 2.
|
||||
/// @dev The deposited ETH tokens are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// ETH will be transfer to the recipient directly.
|
||||
@@ -127,13 +127,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway, IMessa
|
||||
}
|
||||
|
||||
// 2. Generate message passed to L1ScrollMessenger.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ETHGateway.finalizeDepositETH.selector,
|
||||
_from,
|
||||
_to,
|
||||
_amount,
|
||||
_data
|
||||
);
|
||||
bytes memory _message = abi.encodeCall(IL2ETHGateway.finalizeDepositETH, (_from, _to, _amount, _data));
|
||||
|
||||
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit, _from);
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
|
||||
|
||||
/// @title L1StandardERC20Gateway
|
||||
/// @notice The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens in layer 1 and
|
||||
/// @notice The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens on layer 1 and
|
||||
/// finalize withdraw the tokens from layer 2.
|
||||
/// @dev The deposited ERC20 tokens are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// token will be transfer to the recipient directly. Any ERC20 that requires non-standard functionality
|
||||
@@ -150,14 +150,9 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
|
||||
} else {
|
||||
_l2Data = abi.encode(false, _data);
|
||||
}
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC20Gateway.finalizeDepositERC20.selector,
|
||||
_token,
|
||||
_l2Token,
|
||||
_from,
|
||||
_to,
|
||||
_amount,
|
||||
_l2Data
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(_token, _l2Token, _from, _to, _amount, _l2Data)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
|
||||
@@ -15,7 +15,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
|
||||
|
||||
/// @title L1WETHGateway
|
||||
/// @notice The `L1WETHGateway` contract is used to deposit `WETH` token in layer 1 and
|
||||
/// @notice The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and
|
||||
/// finalize withdraw `WETH` from layer 2.
|
||||
/// @dev The deposited WETH tokens are not held in the gateway. It will first be unwrapped
|
||||
/// as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract.
|
||||
@@ -121,14 +121,9 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
IWETH(_token).withdraw(_amount);
|
||||
|
||||
// 2. Generate message passed to L2WETHGateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL2ERC20Gateway.finalizeDepositERC20.selector,
|
||||
_token,
|
||||
l2WETH,
|
||||
_from,
|
||||
_to,
|
||||
_amount,
|
||||
_data
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(_token, l2WETH, _from, _to, _amount, _data)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
|
||||
@@ -20,8 +20,8 @@ interface IScrollChain {
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @param batchHash The hash of the batch
|
||||
/// @param stateRoot The state root in layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root in layer2 after this batch.
|
||||
/// @param stateRoot The state root on layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root on layer2 after this batch.
|
||||
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
|
||||
/*************************
|
||||
|
||||
@@ -15,6 +15,8 @@ import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
|
||||
import {IScrollMessenger} from "../libraries/IScrollMessenger.sol";
|
||||
import {ScrollMessengerBase} from "../libraries/ScrollMessengerBase.sol";
|
||||
|
||||
// solhint-disable reason-string
|
||||
|
||||
/// @title L2ScrollMessenger
|
||||
/// @notice The `L2ScrollMessenger` contract can:
|
||||
///
|
||||
@@ -22,7 +24,7 @@ import {ScrollMessengerBase} from "../libraries/ScrollMessengerBase.sol";
|
||||
/// 2. relay messages from layer 1 layer 2;
|
||||
/// 3. drop expired message due to sequencer problems.
|
||||
///
|
||||
/// @dev It should be a predeployed contract in layer 2 and should hold infinite amount
|
||||
/// @dev It should be a predeployed contract on layer 2 and should hold infinite amount
|
||||
/// of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.
|
||||
contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2ScrollMessenger {
|
||||
/**********
|
||||
@@ -76,6 +78,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IScrollMessenger
|
||||
function sendMessage(
|
||||
address _to,
|
||||
@@ -134,6 +137,8 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _maxFailedExecutionTimes The new max failed execution times.
|
||||
function updateMaxFailedExecutionTimes(uint256 _maxFailedExecutionTimes) external onlyOwner {
|
||||
require(_maxFailedExecutionTimes > 0, "maxFailedExecutionTimes cannot be zero");
|
||||
|
||||
maxFailedExecutionTimes = _maxFailedExecutionTimes;
|
||||
|
||||
emit UpdateMaxFailedExecutionTimes(_maxFailedExecutionTimes);
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
/// @title The interface for the ERC1155 cross chain gateway in layer 2.
|
||||
/// @title The interface for the ERC1155 cross chain gateway on layer 2.
|
||||
interface IL2ERC1155Gateway {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 1.
|
||||
/// @param to The address of recipient in layer 2.
|
||||
/// @param tokenId The token id of the ERC1155 NFT deposited in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 1.
|
||||
/// @param to The address of recipient on layer 2.
|
||||
/// @param tokenId The token id of the ERC1155 NFT deposited on layer 1.
|
||||
/// @param amount The amount of token deposited.
|
||||
event FinalizeDepositERC1155(
|
||||
address indexed l1Token,
|
||||
@@ -24,12 +24,12 @@ interface IL2ERC1155Gateway {
|
||||
uint256 amount
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 1.
|
||||
/// @param to The address of recipient in layer 2.
|
||||
/// @param tokenIds The list of token ids of the ERC1155 NFT deposited in layer 1.
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 1.
|
||||
/// @param to The address of recipient on layer 2.
|
||||
/// @param tokenIds The list of token ids of the ERC1155 NFT deposited on layer 1.
|
||||
/// @param amounts The list of corresponding amounts deposited.
|
||||
event FinalizeBatchDepositERC1155(
|
||||
address indexed l1Token,
|
||||
@@ -40,12 +40,12 @@ interface IL2ERC1155Gateway {
|
||||
uint256[] amounts
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to gateway in layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param tokenId The token id of the ERC1155 NFT to withdraw in layer 2.
|
||||
/// @notice Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenId The token id of the ERC1155 NFT to withdraw on layer 2.
|
||||
/// @param amount The amount of token to withdraw.
|
||||
event WithdrawERC1155(
|
||||
address indexed l1Token,
|
||||
@@ -56,12 +56,12 @@ interface IL2ERC1155Gateway {
|
||||
uint256 amount
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param tokenIds The list of token ids of the ERC1155 NFT to withdraw in layer 2.
|
||||
/// @notice Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
|
||||
/// @param l1Token The address of ERC1155 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC1155 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenIds The list of token ids of the ERC1155 NFT to withdraw on layer 2.
|
||||
/// @param amounts The list of corresponding amounts to withdraw.
|
||||
event BatchWithdrawERC1155(
|
||||
address indexed l1Token,
|
||||
@@ -77,7 +77,7 @@ interface IL2ERC1155Gateway {
|
||||
*****************************/
|
||||
|
||||
/// @notice Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC1155 NFT in layer 2.
|
||||
/// @param token The address of ERC1155 NFT on layer 2.
|
||||
/// @param tokenId The token id to withdraw.
|
||||
/// @param amount The amount of token to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
@@ -89,8 +89,8 @@ interface IL2ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC1155 NFT in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param token The address of ERC1155 NFT on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenId The token id to withdraw.
|
||||
/// @param amount The amount of token to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
@@ -103,7 +103,7 @@ interface IL2ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC1155 NFT in layer 2.
|
||||
/// @param token The address of ERC1155 NFT on layer 2.
|
||||
/// @param tokenIds The list of token ids to withdraw.
|
||||
/// @param amounts The list of corresponding amounts to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
@@ -115,8 +115,8 @@ interface IL2ERC1155Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC1155 NFT in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param token The address of ERC1155 NFT on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenIds The list of token ids to withdraw.
|
||||
/// @param amounts The list of corresponding amounts to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
@@ -128,14 +128,14 @@ interface IL2ERC1155Gateway {
|
||||
uint256 gasLimit
|
||||
) external payable;
|
||||
|
||||
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L2ScrollMessenger.
|
||||
/// - The function should also only be called by L1ERC1155Gateway in layer 1.
|
||||
/// - The function should also only be called by L1ERC1155Gateway on layer 1.
|
||||
/// @param l1Token The address of corresponding layer 1 token.
|
||||
/// @param l2Token The address of corresponding layer 2 token.
|
||||
/// @param from The address of account who deposits the token in layer 1.
|
||||
/// @param to The address of recipient in layer 2 to receive the token.
|
||||
/// @param from The address of account who deposits the token on layer 1.
|
||||
/// @param to The address of recipient on layer 2 to receive the token.
|
||||
/// @param tokenId The token id to deposit.
|
||||
/// @param amount The amount of token to deposit.
|
||||
function finalizeDepositERC1155(
|
||||
@@ -147,14 +147,14 @@ interface IL2ERC1155Gateway {
|
||||
uint256 amount
|
||||
) external;
|
||||
|
||||
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L2ScrollMessenger.
|
||||
/// - The function should also only be called by L1ERC1155Gateway in layer 1.
|
||||
/// - The function should also only be called by L1ERC1155Gateway on layer 1.
|
||||
/// @param l1Token The address of corresponding layer 1 token.
|
||||
/// @param l2Token The address of corresponding layer 2 token.
|
||||
/// @param from The address of account who deposits the token in layer 1.
|
||||
/// @param to The address of recipient in layer 2 to receive the token.
|
||||
/// @param from The address of account who deposits the token on layer 1.
|
||||
/// @param to The address of recipient on layer 2 to receive the token.
|
||||
/// @param tokenIds The list of token ids to deposit.
|
||||
/// @param amounts The list of corresponding amounts to deposit.
|
||||
function finalizeBatchDepositERC1155(
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
/// @title The interface for the ERC721 cross chain gateway in layer 2.
|
||||
/// @title The interface for the ERC721 cross chain gateway on layer 2.
|
||||
interface IL2ERC721Gateway {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to recipient in layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 1.
|
||||
/// @param to The address of recipient in layer 2.
|
||||
/// @param tokenId The token id of the ERC721 NFT deposited in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 1.
|
||||
/// @param to The address of recipient on layer 2.
|
||||
/// @param tokenId The token id of the ERC721 NFT deposited on layer 1.
|
||||
event FinalizeDepositERC721(
|
||||
address indexed l1Token,
|
||||
address indexed l2Token,
|
||||
@@ -22,12 +22,12 @@ interface IL2ERC721Gateway {
|
||||
uint256 tokenId
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 1.
|
||||
/// @param to The address of recipient in layer 2.
|
||||
/// @param tokenIds The list of token ids of the ERC721 NFT deposited in layer 1.
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 1.
|
||||
/// @param to The address of recipient on layer 2.
|
||||
/// @param tokenIds The list of token ids of the ERC721 NFT deposited on layer 1.
|
||||
event FinalizeBatchDepositERC721(
|
||||
address indexed l1Token,
|
||||
address indexed l2Token,
|
||||
@@ -36,12 +36,12 @@ interface IL2ERC721Gateway {
|
||||
uint256[] tokenIds
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to gateway in layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param tokenId The token id of the ERC721 NFT to withdraw in layer 2.
|
||||
/// @notice Emitted when the ERC721 NFT is transfered to gateway on layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenId The token id of the ERC721 NFT to withdraw on layer 2.
|
||||
event WithdrawERC721(
|
||||
address indexed l1Token,
|
||||
address indexed l2Token,
|
||||
@@ -50,12 +50,12 @@ interface IL2ERC721Gateway {
|
||||
uint256 tokenId
|
||||
);
|
||||
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT in layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT in layer 2.
|
||||
/// @param from The address of sender in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param tokenIds The list of token ids of the ERC721 NFT to withdraw in layer 2.
|
||||
/// @notice Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
|
||||
/// @param l1Token The address of ERC721 NFT on layer 1.
|
||||
/// @param l2Token The address of ERC721 NFT on layer 2.
|
||||
/// @param from The address of sender on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenIds The list of token ids of the ERC721 NFT to withdraw on layer 2.
|
||||
event BatchWithdrawERC721(
|
||||
address indexed l1Token,
|
||||
address indexed l2Token,
|
||||
@@ -69,7 +69,7 @@ interface IL2ERC721Gateway {
|
||||
*****************************/
|
||||
|
||||
/// @notice Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC721 NFT in layer 2.
|
||||
/// @param token The address of ERC721 NFT on layer 2.
|
||||
/// @param tokenId The token id to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
function withdrawERC721(
|
||||
@@ -79,8 +79,8 @@ interface IL2ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC721 NFT in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param token The address of ERC721 NFT on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenId The token id to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
function withdrawERC721(
|
||||
@@ -91,7 +91,7 @@ interface IL2ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC721 NFT in layer 2.
|
||||
/// @param token The address of ERC721 NFT on layer 2.
|
||||
/// @param tokenIds The list of token ids to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
function batchWithdrawERC721(
|
||||
@@ -101,8 +101,8 @@ interface IL2ERC721Gateway {
|
||||
) external payable;
|
||||
|
||||
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
|
||||
/// @param token The address of ERC721 NFT in layer 2.
|
||||
/// @param to The address of recipient in layer 1.
|
||||
/// @param token The address of ERC721 NFT on layer 2.
|
||||
/// @param to The address of recipient on layer 1.
|
||||
/// @param tokenIds The list of token ids to withdraw.
|
||||
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
function batchWithdrawERC721(
|
||||
@@ -112,14 +112,14 @@ interface IL2ERC721Gateway {
|
||||
uint256 gasLimit
|
||||
) external payable;
|
||||
|
||||
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L2ScrollMessenger.
|
||||
/// - The function should also only be called by L1ERC721Gateway in layer 1.
|
||||
/// - The function should also only be called by L1ERC721Gateway on layer 1.
|
||||
/// @param l1Token The address of corresponding layer 1 token.
|
||||
/// @param l2Token The address of corresponding layer 2 token.
|
||||
/// @param from The address of account who withdraw the token in layer 1.
|
||||
/// @param to The address of recipient in layer 2 to receive the token.
|
||||
/// @param from The address of account who withdraw the token on layer 1.
|
||||
/// @param to The address of recipient on layer 2 to receive the token.
|
||||
/// @param tokenId The token id to withdraw.
|
||||
function finalizeDepositERC721(
|
||||
address l1Token,
|
||||
@@ -129,14 +129,14 @@ interface IL2ERC721Gateway {
|
||||
uint256 tokenId
|
||||
) external;
|
||||
|
||||
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
|
||||
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
|
||||
/// @dev Requirements:
|
||||
/// - The function should only be called by L2ScrollMessenger.
|
||||
/// - The function should also only be called by L1ERC721Gateway in layer 1.
|
||||
/// - The function should also only be called by L1ERC721Gateway on layer 1.
|
||||
/// @param l1Token The address of corresponding layer 1 token.
|
||||
/// @param l2Token The address of corresponding layer 2 token.
|
||||
/// @param from The address of account who withdraw the token in layer 1.
|
||||
/// @param to The address of recipient in layer 2 to receive the token.
|
||||
/// @param from The address of account who withdraw the token on layer 1.
|
||||
/// @param to The address of recipient on layer 2 to receive the token.
|
||||
/// @param tokenIds The list of token ids to withdraw.
|
||||
function finalizeBatchDepositERC721(
|
||||
address l1Token,
|
||||
|
||||
@@ -12,7 +12,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
|
||||
import {IScrollERC20} from "../../libraries/token/IScrollERC20.sol";
|
||||
|
||||
/// @title L2ERC20Gateway
|
||||
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens in layer 2 and
|
||||
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
|
||||
/// finalize deposit the tokens from layer 1.
|
||||
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// tokens will be minted and transfered to the recipient.
|
||||
@@ -22,8 +22,8 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC20 token is updated.
|
||||
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
|
||||
/// @param _l1Token The address of ERC20 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
|
||||
/// @param _l1Token The address of ERC20 token on layer 1.
|
||||
event UpdateTokenMapping(address _l2Token, address _l1Token);
|
||||
|
||||
/*************
|
||||
@@ -95,8 +95,8 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 2 to layer 1 token mapping.
|
||||
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
|
||||
/// @param _l1Token The address of ERC20 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
|
||||
/// @param _l1Token The address of ERC20 token on layer 1.
|
||||
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -132,14 +132,9 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
|
||||
IScrollERC20(_token).burn(_from, _amount);
|
||||
|
||||
// 3. Generate message passed to L1StandardERC20Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
|
||||
_l1Token,
|
||||
_token,
|
||||
_from,
|
||||
_to,
|
||||
_amount,
|
||||
_data
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(_l1Token, _token, _from, _to, _amount, _data)
|
||||
);
|
||||
|
||||
// 4. send message to L2ScrollMessenger
|
||||
|
||||
@@ -13,7 +13,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
|
||||
import {IScrollERC1155} from "../../libraries/token/IScrollERC1155.sol";
|
||||
|
||||
/// @title L2ERC1155Gateway
|
||||
/// @notice The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 and
|
||||
/// @notice The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and
|
||||
/// finalize deposit the NFTs from layer 1.
|
||||
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// NFT will be minted and transfered to the recipient.
|
||||
@@ -25,8 +25,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC1155 token is updated.
|
||||
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token on layer 1.
|
||||
event UpdateTokenMapping(address _l2Token, address _l1Token);
|
||||
|
||||
/*************
|
||||
@@ -137,8 +137,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 2 to layer 1 token mapping.
|
||||
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
|
||||
/// @param _l1Token The address of ERC1155 token on layer 1.
|
||||
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -152,8 +152,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to withdraw ERC1155 NFT to layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenId The token id to withdraw.
|
||||
/// @param _amount The amount of token to withdraw.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 2.
|
||||
@@ -173,14 +173,9 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
IScrollERC1155(_token).burn(msg.sender, _tokenId, _amount);
|
||||
|
||||
// 2. Generate message passed to L1ERC1155Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL1ERC1155Gateway.finalizeWithdrawERC1155.selector,
|
||||
_l1Token,
|
||||
_token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenId,
|
||||
_amount
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC1155Gateway.finalizeWithdrawERC1155,
|
||||
(_l1Token, _token, msg.sender, _to, _tokenId, _amount)
|
||||
);
|
||||
|
||||
// 3. Send message to L2ScrollMessenger.
|
||||
@@ -190,8 +185,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
}
|
||||
|
||||
/// @dev Internal function to batch withdraw ERC1155 NFT to layer 2.
|
||||
/// @param _token The address of ERC1155 NFT in layer 1.
|
||||
/// @param _to The address of recipient in layer 2.
|
||||
/// @param _token The address of ERC1155 NFT on layer 1.
|
||||
/// @param _to The address of recipient on layer 2.
|
||||
/// @param _tokenIds The list of token ids to withdraw.
|
||||
/// @param _amounts The list of corresponding number of token to withdraw.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.
|
||||
@@ -216,14 +211,9 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
|
||||
IScrollERC1155(_token).batchBurn(msg.sender, _tokenIds, _amounts);
|
||||
|
||||
// 2. Generate message passed to L1ERC1155Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL1ERC1155Gateway.finalizeBatchWithdrawERC1155.selector,
|
||||
_l1Token,
|
||||
_token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenIds,
|
||||
_amounts
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC1155Gateway.finalizeBatchWithdrawERC1155,
|
||||
(_l1Token, _token, msg.sender, _to, _tokenIds, _amounts)
|
||||
);
|
||||
|
||||
// 3. Send message to L2ScrollMessenger.
|
||||
|
||||
@@ -13,7 +13,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
|
||||
import {IScrollERC721} from "../../libraries/token/IScrollERC721.sol";
|
||||
|
||||
/// @title L2ERC721Gateway
|
||||
/// @notice The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and
|
||||
/// @notice The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and
|
||||
/// finalize deposit the NFTs from layer 1.
|
||||
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// NFT will be minted and transfered to the recipient.
|
||||
@@ -25,8 +25,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC721 token is updated.
|
||||
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
|
||||
/// @param _l1Token The address of ERC721 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
|
||||
/// @param _l1Token The address of ERC721 token on layer 1.
|
||||
event UpdateTokenMapping(address _l2Token, address _l1Token);
|
||||
|
||||
/*************
|
||||
@@ -132,8 +132,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
************************/
|
||||
|
||||
/// @notice Update layer 2 to layer 1 token mapping.
|
||||
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
|
||||
/// @param _l1Token The address of ERC721 token in layer 1.
|
||||
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
|
||||
/// @param _l1Token The address of ERC721 token on layer 1.
|
||||
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
|
||||
require(_l1Token != address(0), "token address cannot be 0");
|
||||
|
||||
@@ -147,8 +147,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to withdraw ERC721 NFT to layer 1.
|
||||
/// @param _token The address of ERC721 NFT in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @param _token The address of ERC721 NFT on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenId The token id to withdraw.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.
|
||||
function _withdrawERC721(
|
||||
@@ -166,13 +166,9 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
IScrollERC721(_token).burn(_tokenId);
|
||||
|
||||
// 2. Generate message passed to L1ERC721Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL1ERC721Gateway.finalizeWithdrawERC721.selector,
|
||||
_l1Token,
|
||||
_token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenId
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC721Gateway.finalizeWithdrawERC721,
|
||||
(_l1Token, _token, msg.sender, _to, _tokenId)
|
||||
);
|
||||
|
||||
// 3. Send message to L2ScrollMessenger.
|
||||
@@ -182,8 +178,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
}
|
||||
|
||||
/// @dev Internal function to batch withdraw ERC721 NFT to layer 1.
|
||||
/// @param _token The address of ERC721 NFT in layer 2.
|
||||
/// @param _to The address of recipient in layer 1.
|
||||
/// @param _token The address of ERC721 NFT on layer 2.
|
||||
/// @param _to The address of recipient on layer 1.
|
||||
/// @param _tokenIds The list of token ids to withdraw.
|
||||
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.
|
||||
function _batchWithdrawERC721(
|
||||
@@ -205,13 +201,9 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
|
||||
}
|
||||
|
||||
// 2. Generate message passed to L1ERC721Gateway.
|
||||
bytes memory _message = abi.encodeWithSelector(
|
||||
IL1ERC721Gateway.finalizeBatchWithdrawERC721.selector,
|
||||
_l1Token,
|
||||
_token,
|
||||
msg.sender,
|
||||
_to,
|
||||
_tokenIds
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC721Gateway.finalizeBatchWithdrawERC721,
|
||||
(_l1Token, _token, msg.sender, _to, _tokenIds)
|
||||
);
|
||||
|
||||
// 3. Send message to L2ScrollMessenger.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user