Compare commits

..

5 Commits

Author SHA1 Message Date
vincent
2b7b7dab86 fix 2023-07-05 13:24:10 +08:00
vincent
075d25ac87 update select 2023-07-05 13:14:57 +08:00
vincent
f87dca41e6 bump version 2023-07-05 12:25:29 +08:00
vincent
507ee571f6 fix 2023-07-05 12:10:49 +08:00
vincent
9651e1ca6e update db insert logic 2023-07-05 11:55:09 +08:00
359 changed files with 7986 additions and 24972 deletions

View File

@@ -25,20 +25,20 @@ defaults:
working-directory: 'bridge-history-api' working-directory: 'bridge-history-api'
jobs: jobs:
check: # check:
if: github.event.pull_request.draft == false # if: github.event.pull_request.draft == false
runs-on: ubuntu-latest # runs-on: ubuntu-latest
steps: # steps:
- name: Install Go # - name: Install Go
uses: actions/setup-go@v2 # uses: actions/setup-go@v2
with: # with:
go-version: 1.19.x # go-version: 1.19.x
- name: Checkout code # - name: Checkout code
uses: actions/checkout@v2 # uses: actions/checkout@v2
- name: Lint # - name: Lint
run: | # run: |
rm -rf $HOME/.cache/golangci-lint # rm -rf $HOME/.cache/golangci-lint
make lint # make lint
test: test:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -1,80 +0,0 @@
name: ProverStatsAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover-stats-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover-stats-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -29,25 +29,6 @@ jobs:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: nightly-2022-12-10 toolchain: nightly-2022-12-10
@@ -66,6 +47,13 @@ jobs:
- name: Test - name: Test
run: | run: |
make roller make roller
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
check: check:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -39,12 +39,6 @@ var (
L2WithdrawERC721Sig common.Hash L2WithdrawERC721Sig common.Hash
L2WithdrawERC1155Sig common.Hash L2WithdrawERC1155Sig common.Hash
// batch nft sigs
L1BatchDepositERC721Sig common.Hash
L1BatchDepositERC1155Sig common.Hash
L2BatchWithdrawERC721Sig common.Hash
L2BatchWithdrawERC1155Sig common.Hash
// scroll mono repo // scroll mono repo
// ScrollChainABI holds information about ScrollChain's context and available invokable methods. // ScrollChainABI holds information about ScrollChain's context and available invokable methods.
@@ -122,12 +116,6 @@ func init() {
L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi() L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi()
L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID
// batch nft events
L1BatchDepositERC721Sig = L1ERC721GatewayABI.Events["BatchDepositERC721"].ID
L1BatchDepositERC1155Sig = L1ERC1155GatewayABI.Events["BatchDepositERC1155"].ID
L2BatchWithdrawERC721Sig = L2ERC721GatewayABI.Events["BatchWithdrawERC721"].ID
L2BatchWithdrawERC1155Sig = L2ERC1155GatewayABI.Events["BatchWithdrawERC1155"].ID
// scroll monorepo // scroll monorepo
ScrollChainABI, _ = ScrollChainMetaData.GetAbi() ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi() ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
@@ -289,23 +277,6 @@ type ERC1155MessageEvent struct {
Amount *big.Int Amount *big.Int
} }
type BatchERC721MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
}
type BatchERC1155MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
TokenAmounts []*big.Int
}
// scroll monorepo // scroll monorepo
// L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract. // L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract.

View File

@@ -24,40 +24,26 @@ var (
var database db.OrmFactory var database db.OrmFactory
func pong(ctx iris.Context) { func pong(ctx iris.Context) {
_, err := ctx.WriteString("pong") ctx.WriteString("pong")
if err != nil {
log.Error("failed to write pong", "err", err)
}
} }
func setupQueryByAddressHandler(backendApp *mvc.Application) { func setupQueryByAddressHandler(backend_app *mvc.Application) {
// Register Dependencies. // Register Dependencies.
backendApp.Register( backend_app.Register(
database, database,
service.NewHistoryService, service.NewHistoryService,
) )
// Register Controllers. // Register Controllers.
backendApp.Handle(new(controller.QueryAddressController)) backend_app.Handle(new(controller.QueryAddressController))
} }
func setupQueryClaimableHandler(backendApp *mvc.Application) { func setupQueryByHashHandler(backend_app *mvc.Application) {
// Register Dependencies. backend_app.Register(
backendApp.Register(
database, database,
service.NewHistoryService, service.NewHistoryService,
) )
backend_app.Handle(new(controller.QueryHashController))
// Register Controllers.
backendApp.Handle(new(controller.QueryClaimableController))
}
func setupQueryByHashHandler(backendApp *mvc.Application) {
backendApp.Register(
database,
service.NewHistoryService,
)
backendApp.Handle(new(controller.QueryHashController))
} }
func init() { func init() {
@@ -90,18 +76,13 @@ func action(ctx *cli.Context) error {
if err != nil { if err != nil {
log.Crit("can not connect to database", "err", err) log.Crit("can not connect to database", "err", err)
} }
defer func() { defer database.Close()
if err = database.Close(); err != nil {
log.Error("failed to close database", "err", err)
}
}()
bridgeApp := iris.New() bridgeApp := iris.New()
bridgeApp.UseRouter(corsOptions) bridgeApp.UseRouter(corsOptions)
bridgeApp.Get("/ping", pong).Describe("healthcheck") bridgeApp.Get("/ping", pong).Describe("healthcheck")
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler) mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler) mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
mvc.Configure(bridgeApp.Party("/api/claimable"), setupQueryClaimableHandler)
// TODO: make debug mode configurable // TODO: make debug mode configurable
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug")) err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))

View File

@@ -12,8 +12,8 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"bridge-history-api/config" "bridge-history-api/config"
"bridge-history-api/crossmsg" "bridge-history-api/cross_msg"
"bridge-history-api/crossmsg/messageproof" "bridge-history-api/cross_msg/message_proof"
"bridge-history-api/db" "bridge-history-api/db"
cutils "bridge-history-api/utils" cutils "bridge-history-api/utils"
) )
@@ -54,20 +54,15 @@ func action(ctx *cli.Context) error {
if err != nil { if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err) log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
} }
db, err := db.NewOrmFactory(cfg) db, err := db.NewOrmFactory(cfg)
defer func() { defer db.Close()
if deferErr := db.Close(); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil { if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err) log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
} }
l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"} l1worker := &cross_msg.FetchEventWorker{F: cross_msg.L1FetchAndSaveEvents, G: cross_msg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"} l2worker := &cross_msg.FetchEventWorker{F: cross_msg.L2FetchAndSaveEvents, G: cross_msg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l1AddressList := []common.Address{ l1AddressList := []common.Address{
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr), common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
@@ -89,7 +84,7 @@ func action(ctx *cli.Context) error {
common.HexToAddress(cfg.L2.WETHGatewayAddr), common.HexToAddress(cfg.L2.WETHGatewayAddr),
} }
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling) l1crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, cross_msg.L1ReorgHandling)
if err != nil { if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err) log.Crit("failed to create l1 cross message fetcher", "error", err)
} }
@@ -97,7 +92,7 @@ func action(ctx *cli.Context) error {
go l1crossMsgFetcher.Start() go l1crossMsgFetcher.Start()
defer l1crossMsgFetcher.Stop() defer l1crossMsgFetcher.Stop()
l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling) l2crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, cross_msg.L2ReorgHandling)
if err != nil { if err != nil {
log.Crit("failed to create l2 cross message fetcher", "error", err) log.Crit("failed to create l2 cross message fetcher", "error", err)
} }
@@ -106,17 +101,17 @@ func action(ctx *cli.Context) error {
defer l2crossMsgFetcher.Stop() defer l2crossMsgFetcher.Stop()
// BlockTimestamp fetcher for l1 and l2 // BlockTimestamp fetcher for l1 and l2
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight) l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
go l1BlockTimeFetcher.Start() go l1BlockTimeFetcher.Start()
defer l1BlockTimeFetcher.Stop() defer l1BlockTimeFetcher.Stop()
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight) l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
go l2BlockTimeFetcher.Start() go l2BlockTimeFetcher.Start()
defer l2BlockTimeFetcher.Stop() defer l2BlockTimeFetcher.Stop()
// Proof updater and batch fetcher // Proof updater and batch fetcher
l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db) l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater) batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
go batchFetcher.Start() go batchFetcher.Start()
defer batchFetcher.Stop() defer batchFetcher.Stop()

View File

@@ -6,7 +6,6 @@ import (
"path/filepath" "path/filepath"
) )
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
type BatchInfoFetcherConfig struct { type BatchInfoFetcherConfig struct {
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"` BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
ScrollChainAddr string `json:"ScrollChainAddr"` ScrollChainAddr string `json:"ScrollChainAddr"`
@@ -22,7 +21,6 @@ type DBConfig struct {
MaxIdleNum int `json:"maxIdleNum"` MaxIdleNum int `json:"maxIdleNum"`
} }
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct { type LayerConfig struct {
Confirmation uint64 `json:"confirmation"` Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
@@ -37,7 +35,6 @@ type LayerConfig struct {
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"` CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
} }
// ServerConfig is the configuration of the bridge history backend server port
type ServerConfig struct { type ServerConfig struct {
HostPort string `json:"hostPort"` HostPort string `json:"hostPort"`
} }

View File

@@ -7,36 +7,14 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// QueryAddressController contains the query by address service
type QueryAddressController struct { type QueryAddressController struct {
Service service.HistoryService Service service.HistoryService
} }
// QueryHashController contains the query by hash service
type QueryHashController struct { type QueryHashController struct {
Service service.HistoryService Service service.HistoryService
} }
// QueryClaimableController contains the query claimable txs service
type QueryClaimableController struct {
Service service.HistoryService
}
// Get defines the http get method behavior for QueryClaimableController
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: txs,
Total: total,
}}, nil
}
// Get defines the http get method behavior for QueryAddressController
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) { func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit)) message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil { if err != nil {
@@ -50,7 +28,6 @@ func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.Qu
}}, nil }}, nil
} }
// Post defines the http post method behavior for QueryHashController
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) { func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
result, err := c.Service.GetTxsByHashes(req.Txs) result, err := c.Service.GetTxsByHashes(req.Txs)
if err != nil { if err != nil {

View File

@@ -1,4 +1,4 @@
package crossmsg package cross_msg
import ( import (
"context" "context"
@@ -8,12 +8,11 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"bridge-history-api/crossmsg/messageproof" "bridge-history-api/cross_msg/message_proof"
"bridge-history-api/db" "bridge-history-api/db"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
// BatchInfoFetcher fetches batch info from l1 chain and update db
type BatchInfoFetcher struct { type BatchInfoFetcher struct {
ctx context.Context ctx context.Context
scrollChainAddr common.Address scrollChainAddr common.Address
@@ -22,11 +21,10 @@ type BatchInfoFetcher struct {
blockTimeInSec int blockTimeInSec int
client *ethclient.Client client *ethclient.Client
db db.OrmFactory db db.OrmFactory
msgProofUpdater *messageproof.MsgProofUpdater msgProofUpdater *message_proof.MsgProofUpdater
} }
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher {
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
return &BatchInfoFetcher{ return &BatchInfoFetcher{
ctx: ctx, ctx: ctx,
scrollChainAddr: scrollChainAddr, scrollChainAddr: scrollChainAddr,
@@ -39,14 +37,13 @@ func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, ba
} }
} }
// Start the BatchInfoFetcher
func (b *BatchInfoFetcher) Start() { func (b *BatchInfoFetcher) Start() {
log.Info("BatchInfoFetcher Start") log.Info("BatchInfoFetcher Start")
// Fetch batch info at beginning // Fetch batch info at beginning
// Then start msg proof updater after db have some bridge batch // Then start msg proof updater after db have some bridge batch
err := b.fetchBatchInfo() err := b.fetchBatchInfo()
if err != nil { if err != nil {
log.Error("fetch batch info at beginning failed: ", "err", err) log.Error("fetch batch info at begining failed: ", "err", err)
} }
go b.msgProofUpdater.Start() go b.msgProofUpdater.Start()
@@ -68,7 +65,6 @@ func (b *BatchInfoFetcher) Start() {
}() }()
} }
// Stop the BatchInfoFetcher and call msg proof updater to stop
func (b *BatchInfoFetcher) Stop() { func (b *BatchInfoFetcher) Stop() {
log.Info("BatchInfoFetcher Stop") log.Info("BatchInfoFetcher Stop")
b.msgProofUpdater.Stop() b.msgProofUpdater.Stop()

View File

@@ -1,4 +1,4 @@
package crossmsg package cross_msg
import ( import (
"context" "context"
@@ -9,13 +9,9 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error) type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
type BlockTimestampFetcher struct { type BlockTimestampFetcher struct {
ctx context.Context ctx context.Context
confirmation uint64 confirmation uint64
@@ -25,7 +21,6 @@ type BlockTimestampFetcher struct {
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
} }
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher { func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
return &BlockTimestampFetcher{ return &BlockTimestampFetcher{
ctx: ctx, ctx: ctx,
@@ -37,7 +32,6 @@ func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTim
} }
} }
// Start the BlockTimestampFetcher
func (b *BlockTimestampFetcher) Start() { func (b *BlockTimestampFetcher) Start() {
go func() { go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second) tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
@@ -79,7 +73,6 @@ func (b *BlockTimestampFetcher) Start() {
}() }()
} }
// Stop the BlockTimestampFetcher and log the info
func (b *BlockTimestampFetcher) Stop() { func (b *BlockTimestampFetcher) Stop() {
log.Info("BlockTimestampFetcher Stop") log.Info("BlockTimestampFetcher Stop")
} }

View File

@@ -1,4 +1,4 @@
package crossmsg package cross_msg
import ( import (
"context" "context"
@@ -18,8 +18,7 @@ import (
"bridge-history-api/utils" "bridge-history-api/utils"
) )
// MsgFetcher fetches cross message events from blockchain and saves them to database type CrossMsgFetcher struct {
type MsgFetcher struct {
ctx context.Context ctx context.Context
config *config.LayerConfig config *config.LayerConfig
db db.OrmFactory db db.OrmFactory
@@ -33,9 +32,8 @@ type MsgFetcher struct {
reorgEndCh chan struct{} reorgEndCh chan struct{}
} }
// NewMsgFetcher creates a new MsgFetcher instance func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) { crossMsgFetcher := &CrossMsgFetcher{
msgFetcher := &MsgFetcher{
ctx: ctx, ctx: ctx,
config: config, config: config,
db: db, db: db,
@@ -47,12 +45,11 @@ func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFac
reorgStartCh: make(chan struct{}), reorgStartCh: make(chan struct{}),
reorgEndCh: make(chan struct{}), reorgEndCh: make(chan struct{}),
} }
return msgFetcher, nil return crossMsgFetcher, nil
} }
// Start the MsgFetcher func (c *CrossMsgFetcher) Start() {
func (c *MsgFetcher) Start() { log.Info("CrossMsgFetcher Start")
log.Info("MsgFetcher Start")
// fetch missing events from finalized blocks, we don't handle reorgs here // fetch missing events from finalized blocks, we don't handle reorgs here
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation) c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
@@ -97,13 +94,12 @@ func (c *MsgFetcher) Start() {
}() }()
} }
// Stop the MsgFetcher and log the info func (c *CrossMsgFetcher) Stop() {
func (c *MsgFetcher) Stop() { log.Info("CrossMsgFetcher Stop")
log.Info("MsgFetcher Stop")
} }
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number. // forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) { func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
// if we fetch to the latest block, shall not exceed cachedHeaders // if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64 var number uint64
var err error var err error
@@ -128,7 +124,7 @@ func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) { if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
processedHeight = int64(c.config.StartHeight) processedHeight = int64(c.config.StartHeight)
} else { } else {
processedHeight++ processedHeight += 1
} }
for from := processedHeight; from <= int64(number); from += fetchLimit { for from := processedHeight; from <= int64(number); from += fetchLimit {
to := from + fetchLimit - 1 to := from + fetchLimit - 1
@@ -143,7 +139,7 @@ func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
} }
} }
func (c *MsgFetcher) fetchMissingLatestHeaders() { func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
var start int64 var start int64
number, err := c.client.BlockNumber(c.ctx) number, err := c.client.BlockNumber(c.ctx)
if err != nil { if err != nil {
@@ -163,7 +159,7 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
close(c.reorgEndCh) close(c.reorgEndCh)
return return
default: default:
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i)) header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(i)))
if err != nil { if err != nil {
log.Error("failed to get latest header", "err", err) log.Error("failed to get latest header", "err", err)
return return
@@ -185,9 +181,9 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
c.mu.Lock() c.mu.Lock()
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header) index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
if !ok { if !ok {
log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number) log.Error("Reorg happended too earlier than cached headers", "reorg height", header.Number)
num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation) num, err := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if getSafeErr != nil { if err != nil {
log.Crit("Can not get safe number during reorg, quit the process", "err", err) log.Crit("Can not get safe number during reorg, quit the process", "err", err)
} }
// clear all our saved data, because no data is safe now // clear all our saved data, because no data is safe now

View File

@@ -1,4 +1,4 @@
package crossmsg package cross_msg
import ( import (
"context" "context"
@@ -8,9 +8,11 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
backendabi "bridge-history-api/abi" backendabi "bridge-history-api/abi"
"bridge-history-api/db" "bridge-history-api/db"
"bridge-history-api/db/orm"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
@@ -25,15 +27,14 @@ type FetchAndSave func(ctx context.Context, client *ethclient.Client, database d
// GetLatestProcessed is a function type that gets the latest processed block height from database // GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(db db.OrmFactory) (int64, error) type GetLatestProcessed func(db db.OrmFactory) (int64, error)
type UpdateXHash func(ctx context.Context)
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
type FetchEventWorker struct { type FetchEventWorker struct {
F FetchAndSave F FetchAndSave
G GetLatestProcessed G GetLatestProcessed
Name string Name string
} }
// GetLatestL1ProcessedHeight get L1 the latest processed height
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) { func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL1ProcessedHeight() crossHeight, err := db.GetLatestL1ProcessedHeight()
if err != nil { if err != nil {
@@ -47,11 +48,11 @@ func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
} }
if crossHeight > relayedHeight { if crossHeight > relayedHeight {
return crossHeight, nil return crossHeight, nil
} else {
return relayedHeight, nil
} }
return relayedHeight, nil
} }
// GetLatestL2ProcessedHeight get L2 latest processed height
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) { func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL2ProcessedHeight() crossHeight, err := db.GetLatestL2ProcessedHeight()
if err != nil { if err != nil {
@@ -78,7 +79,6 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
return maxHeight, nil return maxHeight, nil
} }
// L1FetchAndSaveEvents fetch and save events on L1
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error { func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{ query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive FromBlock: big.NewInt(from), // inclusive
@@ -100,11 +100,19 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l1 event logs", "err", err) log.Warn("Failed to get l1 event logs", "err", err)
return err return err
} }
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs) depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil { if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err) log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err return err
} }
for i := range depositL1CrossMsgs {
for _, msgHash := range msgHashes {
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
break
}
}
}
dbTx, err := database.Beginx() dbTx, err := database.Beginx()
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err) log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -112,25 +120,19 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
} }
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs) err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err) log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
} }
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg) err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err) log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
} }
err = dbTx.Commit() err = dbTx.Commit()
if err != nil { if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually // if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err) log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err return err
} }
@@ -138,7 +140,6 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
return nil return nil
} }
// L2FetchAndSaveEvents fetche and save events on L2
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error { func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{ query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive FromBlock: big.NewInt(from), // inclusive
@@ -160,12 +161,22 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l2 event logs", "err", err) log.Warn("Failed to get l2 event logs", "err", err)
return err return err
} }
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs) depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs)
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err) log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err return err
} }
var l2SentMsgs []*orm.L2SentMsg
for i := range depositL2CrossMsgs {
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
break
}
}
}
dbTx, err := database.Beginx() dbTx, err := database.Beginx()
if err != nil { if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err) log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -173,34 +184,28 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
} }
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs) err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err) log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
} }
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg) err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err) log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
} }
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs) if len(l2SentMsgs) > 0 {
if err != nil { err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { if err != nil {
log.Error("dbTx Rollback failed", "err", rollBackErr) dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
} }
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
} }
err = dbTx.Commit() err = dbTx.Commit()
if err != nil { if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually // if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err) log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err return err
} }
@@ -208,7 +213,6 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
return nil return nil
} }
// FetchAndSaveBatchIndex fetche and save batch index
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error { func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
query := geth.FilterQuery{ query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive FromBlock: big.NewInt(from), // inclusive
@@ -235,19 +239,37 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
} }
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches) err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err) log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
} }
err = dbTx.Commit() err = dbTx.Commit()
if err != nil { if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually // if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err) log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
return err return err
} }
return nil return nil
} }
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}

View File

@@ -1,4 +1,4 @@
package messageproof package message_proof
import ( import (
"context" "context"
@@ -14,14 +14,12 @@ import (
"bridge-history-api/db/orm" "bridge-history-api/db/orm"
) )
// MsgProofUpdater is used to update message proof in db
type MsgProofUpdater struct { type MsgProofUpdater struct {
ctx context.Context ctx context.Context
db db.OrmFactory db db.OrmFactory
withdrawTrie *WithdrawTrie withdrawTrie *WithdrawTrie
} }
// NewMsgProofUpdater new MsgProofUpdater instance
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater { func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
return &MsgProofUpdater{ return &MsgProofUpdater{
ctx: ctx, ctx: ctx,
@@ -30,7 +28,6 @@ func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock ui
} }
} }
// Start the MsgProofUpdater
func (m *MsgProofUpdater) Start() { func (m *MsgProofUpdater) Start() {
log.Info("MsgProofUpdater Start") log.Info("MsgProofUpdater Start")
m.initialize(m.ctx) m.initialize(m.ctx)
@@ -86,7 +83,6 @@ func (m *MsgProofUpdater) Start() {
} }
// Stop the MsgProofUpdater
func (m *MsgProofUpdater) Stop() { func (m *MsgProofUpdater) Stop() {
log.Info("MsgProofUpdater Stop") log.Info("MsgProofUpdater Stop")
} }
@@ -116,7 +112,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
return fmt.Errorf("failed to get first l2 message: %v", err) return fmt.Errorf("failed to get first l2 message: %v", err)
} }
// no l2 message // no l2 message
// TO DO: check if we really dont have l2 sent message with nonce 0 // TO DO: check if we realy dont have l2 sent message with nonce 0
if firstMsg == nil { if firstMsg == nil {
log.Info("No first l2sentmsg in db") log.Info("No first l2sentmsg in db")
return nil return nil
@@ -187,7 +183,7 @@ func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte,
if len(msgs) == 0 { if len(msgs) == 0 {
return nil return nil
} }
// this should not happen, but double check // this should not happend, but double checked
if len(msgs) != len(proofs) { if len(msgs) != len(proofs) {
return fmt.Errorf("illegal state: len(msgs) != len(proofs)") return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
} }

View File

@@ -1,4 +1,4 @@
package messageproof package message_proof
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"

View File

@@ -1,4 +1,4 @@
package messageproof package message_proof
import ( import (
"math/big" "math/big"

View File

@@ -1,4 +1,4 @@
package crossmsg_test package cross_msg_test
import ( import (
"crypto/rand" "crypto/rand"
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"bridge-history-api/crossmsg" "bridge-history-api/cross_msg"
) )
func TestMergeIntoList(t *testing.T) { func TestMergeIntoList(t *testing.T) {
@@ -18,7 +18,7 @@ func TestMergeIntoList(t *testing.T) {
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash) assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
headers2, err := generateHeaders(18) headers2, err := generateHeaders(18)
assert.NoError(t, err) assert.NoError(t, err)
result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64) result := cross_msg.MergeAddIntoHeaderList(headers, headers2, 64)
assert.Equal(t, 64, len(result)) assert.Equal(t, 64, len(result))
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1]) assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
assert.NotEqual(t, headers[0], result[0]) assert.NotEqual(t, headers[0], result[0])
@@ -53,7 +53,7 @@ func generateHeaders(amount int) ([]*types.Header, error) {
Time: uint64(i * 15), Time: uint64(i * 15),
Extra: []byte{}, Extra: []byte{},
MixDigest: common.Hash{}, MixDigest: common.Hash{},
Nonce: types.EncodeNonce(nonce.Uint64()), Nonce: types.EncodeNonce(uint64(nonce.Uint64())),
} }
headers[i] = header headers[i] = header
} }

View File

@@ -1,4 +1,4 @@
package crossmsg package cross_msg
import ( import (
"context" "context"
@@ -10,7 +10,6 @@ import (
"bridge-history-api/db" "bridge-history-api/db"
) )
// ReorgHandling handles reorg function type
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
func reverseArray(arr []*types.Header) []*types.Header { func reverseArray(arr []*types.Header) []*types.Header {
@@ -21,12 +20,10 @@ func reverseArray(arr []*types.Header) []*types.Header {
return arr return arr
} }
// IsParentAndChild match the child header ParentHash with parent header Hash
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool { func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash() return header.ParentHash == parentHeader.Hash()
} }
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header { func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...) mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength { if len(mergedArr) <= maxLength {
@@ -37,12 +34,11 @@ func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []
return mergedArr[startIndex:] return mergedArr[startIndex:]
} }
// BackwardFindReorgBlock finds the reorg block by backward search func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, header *types.Header) (int, bool, []*types.Header) {
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers) maxStep := len(headers)
backwardHeaderList := []*types.Header{lastHeader} backwardHeaderList := []*types.Header{header}
for iterRound := 0; iterRound < maxStep; iterRound++ { for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash) header, err := client.HeaderByHash(ctx, header.ParentHash)
if err != nil { if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err) log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil return -1, false, nil
@@ -54,12 +50,10 @@ func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client
return j, true, backwardHeaderList return j, true, backwardHeaderList
} }
} }
lastHeader = header
} }
return -1, false, nil return -1, false, nil
} }
// L1ReorgHandling handles l1 reorg
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error { func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx() dbTx, err := db.Beginx()
if err != nil { if err != nil {
@@ -67,64 +61,47 @@ func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) e
} }
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight) err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err) log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
} }
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight) err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err) log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
} }
err = dbTx.Commit() err = dbTx.Commit()
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("commit tx failed", "err", err) log.Error("commit tx failed", "err", err)
return err return err
} }
return nil return nil
} }
// L2ReorgHandling handles l2 reorg
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error { func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx() dbTx, err := db.Beginx()
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("begin db tx failed", "err", err) log.Crit("begin db tx failed", "err", err)
} }
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight) err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err) log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
} }
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight) err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err) log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
} }
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight) err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err) log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
} }
err = dbTx.Commit() err = dbTx.Commit()
if err != nil { if err != nil {
if rollBackErr := dbTx.Rollback(); rollBackErr != nil { dbTx.Rollback()
log.Error("dbTx Rollback failed", "err", rollBackErr)
}
log.Error("commit tx failed", "err", err) log.Error("commit tx failed", "err", err)
return err return err
} }

View File

@@ -19,8 +19,6 @@ CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at); CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -3,8 +3,7 @@
create table l2_sent_msg create table l2_sent_msg
( (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
original_sender VARCHAR NOT NULL DEFAULT '', tx_sender VARCHAR NOT NULL,
tx_hash VARCHAR NOT NULL,
sender VARCHAR NOT NULL, sender VARCHAR NOT NULL,
target VARCHAR NOT NULL, target VARCHAR NOT NULL,
value VARCHAR NOT NULL, value VARCHAR NOT NULL,
@@ -25,8 +24,6 @@ on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL; on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -11,7 +11,6 @@ type rollupBatchOrm struct {
db *sqlx.DB db *sqlx.DB
} }
// RollupBatch is the struct for rollup_batch table
type RollupBatch struct { type RollupBatch struct {
ID uint64 `json:"id" db:"id"` ID uint64 `json:"id" db:"id"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"` BatchIndex uint64 `json:"batch_index" db:"batch_index"`

View File

@@ -8,10 +8,7 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
// AssetType can be ETH/ERC20/ERC1155/ERC721
type AssetType int type AssetType int
// MsgType can be layer1/layer2 msg
type MsgType int type MsgType int
func (a AssetType) String() string { func (a AssetType) String() string {
@@ -29,22 +26,15 @@ func (a AssetType) String() string {
} }
const ( const (
// ETH = 0
ETH AssetType = iota ETH AssetType = iota
// ERC20 = 1
ERC20 ERC20
// ERC721 = 2
ERC721 ERC721
// ERC1155 = 3
ERC1155 ERC1155
) )
const ( const (
// UnknownMsg = 0
UnknownMsg MsgType = iota UnknownMsg MsgType = iota
// Layer1Msg = 1
Layer1Msg Layer1Msg
// Layer2Msg = 2
Layer2Msg Layer2Msg
) )
@@ -96,20 +86,17 @@ type L2CrossMsgOrm interface {
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlockTimestampHeight() (uint64, error) GetL2EarliestNoBlockTimestampHeight() (uint64, error)
GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error)
} }
// RelayedMsgOrm provides operations on relayed_msg table
type RelayedMsgOrm interface { type RelayedMsgOrm interface {
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error)
GetLatestRelayedHeightOnL1() (int64, error) GetLatestRelayedHeightOnL1() (int64, error)
GetLatestRelayedHeightOnL2() (int64, error) GetLatestRelayedHeightOnL2() (int64, error)
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
} }
// L2SentMsgOrm provides operations on l2_sent_msg table
type L2SentMsgOrm interface { type L2SentMsgOrm interface {
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error) GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
@@ -117,14 +104,11 @@ type L2SentMsgOrm interface {
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
GetLatestL2SentMsgBatchIndex() (int64, error) GetLatestL2SentMsgBatchIndex() (int64, error)
GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error)
GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error)
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
} }
// RollupBatchOrm provides operations on rollup_batch table
type RollupBatchOrm interface { type RollupBatchOrm interface {
GetLatestRollupBatch() (*RollupBatch, error) GetLatestRollupBatch() (*RollupBatch, error)
GetRollupBatchByIndex(index uint64) (*RollupBatch, error) GetRollupBatchByIndex(index uint64) (*RollupBatch, error)

View File

@@ -37,14 +37,7 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) { func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg) rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() { for rows.Next() {
msg := &CrossMsg{} msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil { if err = rows.StructScan(msg); err != nil {
@@ -80,7 +73,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer1Msg, "msg_type": Layer1Msg,
} }
} }
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps) _, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err) log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
return err return err

View File

@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/lib/pq"
) )
type l2CrossMsgOrm struct { type l2CrossMsgOrm struct {
@@ -38,14 +37,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) { func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg) rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() { for rows.Next() {
msg := &CrossMsg{} msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil { if err = rows.StructScan(msg); err != nil {
@@ -93,7 +85,7 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"msg_type": Layer2Msg, "msg_type": Layer2Msg,
} }
} }
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps) _, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err) log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
return err return err
@@ -148,30 +140,3 @@ func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
} }
return result, nil return result, nil
} }
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash = ANY($1) AND msg_type = $2 AND deleted_at IS NULL;`, pq.Array(msgHashList), Layer2Msg)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
}
if len(results) == 0 {
log.Debug("no L2CrossMsg under given msg hashes", "msg hash list", msgHashList)
}
return results, nil
}

View File

@@ -9,23 +9,21 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
// L2SentMsg defines the struct for l2_sent_msg table record
type L2SentMsg struct { type L2SentMsg struct {
ID uint64 `json:"id" db:"id"` ID uint64 `json:"id" db:"id"`
OriginalSender string `json:"original_sender" db:"original_sender"` TxSender string `json:"tx_sender" db:"tx_sender"`
TxHash string `json:"tx_hash" db:"tx_hash"` MsgHash string `json:"msg_hash" db:"msg_hash"`
MsgHash string `json:"msg_hash" db:"msg_hash"` Sender string `json:"sender" db:"sender"`
Sender string `json:"sender" db:"sender"` Target string `json:"target" db:"target"`
Target string `json:"target" db:"target"` Value string `json:"value" db:"value"`
Value string `json:"value" db:"value"` Height uint64 `json:"height" db:"height"`
Height uint64 `json:"height" db:"height"` Nonce uint64 `json:"nonce" db:"nonce"`
Nonce uint64 `json:"nonce" db:"nonce"` BatchIndex uint64 `json:"batch_index" db:"batch_index"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"` MsgProof string `json:"msg_proof" db:"msg_proof"`
MsgProof string `json:"msg_proof" db:"msg_proof"` MsgData string `json:"msg_data" db:"msg_data"`
MsgData string `json:"msg_data" db:"msg_data"` CreatedAt *time.Time `json:"created_at" db:"created_at"`
CreatedAt *time.Time `json:"created_at" db:"created_at"` UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"` DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
} }
type l2SentMsgOrm struct { type l2SentMsgOrm struct {
@@ -54,20 +52,19 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
messageMaps := make([]map[string]interface{}, len(messages)) messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages { for i, msg := range messages {
messageMaps[i] = map[string]interface{}{ messageMaps[i] = map[string]interface{}{
"original_sender": msg.OriginalSender, "tx_sender": msg.TxSender,
"tx_hash": msg.TxHash, "sender": msg.Sender,
"sender": msg.Sender, "target": msg.Target,
"target": msg.Target, "value": msg.Value,
"value": msg.Value, "msg_hash": msg.MsgHash,
"msg_hash": msg.MsgHash, "height": msg.Height,
"height": msg.Height, "nonce": msg.Nonce,
"nonce": msg.Nonce, "batch_index": msg.BatchIndex,
"batch_index": msg.BatchIndex, "msg_proof": msg.MsgProof,
"msg_proof": msg.MsgProof, "msg_data": msg.MsgData,
"msg_data": msg.MsgData,
} }
} }
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, tx_hash, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :tx_hash, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps) _, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil { if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err) log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err return err
@@ -90,15 +87,15 @@ func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
return 0, nil return 0, nil
} }
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error { func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batchIndex, msgHash); err != nil { if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
return err return err
} }
return nil return nil
} }
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) { func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`) row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index !=0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
var result sql.NullInt64 var result sql.NullInt64
if err := row.Scan(&result); err != nil { if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid { if err == sql.ErrNoRows || !result.Valid {
@@ -118,11 +115,6 @@ func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight u
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() { for rows.Next() {
msg := &L2SentMsg{} msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil { if err = rows.StructScan(msg); err != nil {
@@ -157,33 +149,3 @@ func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int6
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height) _, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
return err return err
} }
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset)
if err != nil {
return nil, err
}
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
return results, err
}
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error) {
var count uint64
row := l.db.QueryRowx(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1);`, address)
if err := row.Scan(&count); err != nil {
return 0, err
}
return count, nil
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
// RelayedMsg is the struct for relayed_msg table
type RelayedMsg struct { type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"` MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"` Height uint64 `json:"height" db:"height"`
@@ -47,9 +46,9 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
return nil return nil
} }
func (l *relayedMsgOrm) GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error) { func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
result := &RelayedMsg{} result := &RelayedMsg{}
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash) row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
if err := row.StructScan(result); err != nil { if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) { if errors.Is(err, sql.ErrNoRows) {
return nil, nil return nil, nil

View File

@@ -4,7 +4,6 @@ import (
"database/sql" "database/sql"
"errors" "errors"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint _ "github.com/lib/pq" //nolint:golint
@@ -83,11 +82,6 @@ func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64
if err != nil || rows == nil { if err != nil || rows == nil {
return nil, err return nil, err
} }
defer func() {
if err = rows.Close(); err != nil {
log.Error("failed to close rows", "err", err)
}
}()
for rows.Next() { for rows.Next() {
msg := &orm.CrossMsg{} msg := &orm.CrossMsg{}
if err = rows.StructScan(msg); err != nil { if err = rows.StructScan(msg); err != nil {

View File

@@ -7,17 +7,17 @@ require (
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
github.com/jmoiron/sqlx v1.3.5 github.com/jmoiron/sqlx v1.3.5
github.com/kataras/iris/v12 v12.2.0 github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.7
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19 github.com/mattn/go-isatty v0.0.18
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
github.com/pressly/goose/v3 v3.7.0 github.com/pressly/goose/v3 v3.7.0
github.com/stretchr/testify v1.8.3 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
) )
require ( require (
github.com/BurntSushi/toml v1.3.2 // indirect github.com/BurntSushi/toml v1.2.1 // indirect
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
github.com/CloudyKit/jet/v6 v6.2.0 // indirect github.com/CloudyKit/jet/v6 v6.2.0 // indirect
github.com/DataDog/zstd v1.5.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect
@@ -44,7 +44,7 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v23.0.6+incompatible // indirect github.com/docker/docker v20.10.21+incompatible // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect github.com/fatih/structs v1.1.0 // indirect
@@ -54,7 +54,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/httphead v0.1.0 // indirect
github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/pool v0.2.1 // indirect
@@ -91,7 +91,7 @@ require (
github.com/mailgun/raymond/v2 v2.0.48 // indirect github.com/mailgun/raymond/v2 v2.0.48 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.23 // indirect github.com/microcosm-cc/bluemonday v1.0.23 // indirect
@@ -134,16 +134,16 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yosssi/ace v0.0.5 // indirect github.com/yosssi/ace v0.0.5 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect golang.org/x/crypto v0.10.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/net v0.12.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect golang.org/x/tools v0.8.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.29.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -1,8 +1,8 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
@@ -97,8 +97,8 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU= github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
@@ -144,8 +144,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -303,8 +303,8 @@ github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awS
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw= github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
@@ -326,15 +326,15 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -456,14 +456,18 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -484,8 +488,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -533,8 +537,8 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
@@ -547,7 +551,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -570,8 +574,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -581,8 +585,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -624,8 +628,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -634,8 +638,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
@@ -656,8 +660,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -688,8 +692,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@@ -1,13 +1,11 @@
package model package model
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct { type QueryByAddressRequest struct {
Address string `url:"address"` Address string `url:"address"`
Offset int `url:"offset"` Offset int `url:"offset"`
Limit int `url:"limit"` Limit int `url:"limit"`
} }
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct { type QueryByHashRequest struct {
Txs []string `url:"txs"` Txs []string `url:"txs"`
} }

View File

@@ -2,19 +2,16 @@ package model
import "bridge-history-api/service" import "bridge-history-api/service"
// Data the return struct of apis
type Data struct { type Data struct {
Result []*service.TxHistoryInfo `json:"result"` Result []*service.TxHistoryInfo `json:"result"`
Total uint64 `json:"total"` Total uint64 `json:"total"`
} }
// QueryByAddressResponse the schema of address api response
type QueryByAddressResponse struct { type QueryByAddressResponse struct {
Message string `json:"message"` Message string `json:"message"`
Data *Data `json:"data"` Data *Data `json:"data"`
} }
// QueryByHashResponse the schema of hash api response
type QueryByHashResponse struct { type QueryByHashResponse struct {
Message string `json:"message"` Message string `json:"message"`
Data *Data `json:"data"` Data *Data `json:"data"`

View File

@@ -11,7 +11,6 @@ import (
"bridge-history-api/db/orm" "bridge-history-api/db/orm"
) )
// Finalized the schema of tx finalized infos
type Finalized struct { type Finalized struct {
Hash string `json:"hash"` Hash string `json:"hash"`
Amount string `json:"amount"` Amount string `json:"amount"`
@@ -21,7 +20,6 @@ type Finalized struct {
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
} }
// UserClaimInfo the schema of tx claim infos
type UserClaimInfo struct { type UserClaimInfo struct {
From string `json:"from"` From string `json:"from"`
To string `json:"to"` To string `json:"to"`
@@ -33,7 +31,6 @@ type UserClaimInfo struct {
BatchIndex string `json:"batch_index"` BatchIndex string `json:"batch_index"`
} }
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct { type TxHistoryInfo struct {
Hash string `json:"hash"` Hash string `json:"hash"`
Amount string `json:"amount"` Amount string `json:"amount"`
@@ -50,7 +47,6 @@ type TxHistoryInfo struct {
type HistoryService interface { type HistoryService interface {
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
} }
// NewHistoryService returns a service backed with a "db" // NewHistoryService returns a service backed with a "db"
@@ -64,7 +60,6 @@ type historyBackend struct {
db db.OrmFactory db db.OrmFactory
} }
// GetCrossTxClaimInfo get UserClaimInfos by address
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo { func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash) l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
if err != nil { if err != nil {
@@ -111,50 +106,6 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
} }
// GetClaimableTxsByAddress get all claimable txs under given address
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
total, err := h.db.GetClaimableL2SentMsgByAddressTotalNum(address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := h.db.GetClaimableL2SentMsgByAddressWithOffset(address.Hex(), offset, limit)
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
var msgHashList []string
for _, result := range results {
msgHashList = append(msgHashList, result.MsgHash)
}
crossMsgs, err := h.db.GetL2CrossMsgByMsgHashList(msgHashList)
// crossMsgs can be empty, because they can be emitted by user directly call contract
if err != nil {
return txHistories, 0, err
}
crossMsgMap := make(map[string]*orm.CrossMsg)
for _, crossMsg := range crossMsgs {
crossMsgMap[crossMsg.MsgHash] = crossMsg
}
for _, result := range results {
txInfo := &TxHistoryInfo{
Hash: result.TxHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &Finalized{},
ClaimInfo: GetCrossTxClaimInfo(result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
txInfo.To = crossMsg.Target
txInfo.BlockTimestamp = crossMsg.Timestamp
txInfo.CreatedAt = crossMsg.CreatedAt
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
// GetTxsByAddress get all txs under given address
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) { func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo var txHistories []*TxHistoryInfo
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String()) total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
@@ -186,7 +137,6 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
return txHistories, total, nil return txHistories, total, nil
} }
// GetTxsByHashes get tx infos under given tx hashes
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) { func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0) txHistories := make([]*TxHistoryInfo, 0)
for _, hash := range hashes { for _, hash := range hashes {

View File

@@ -2,9 +2,8 @@ package utils
import ( import (
"context" "context"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
@@ -14,7 +13,16 @@ import (
"bridge-history-api/db/orm" "bridge-history-api/db/orm"
) )
// CachedParsedTxCalldata store parsed batch infos type MsgHashWrapper struct {
MsgHash common.Hash
TxHash common.Hash
}
type L2SentMsgWrapper struct {
L2SentMsg *orm.L2SentMsg
TxHash common.Hash
}
type CachedParsedTxCalldata struct { type CachedParsedTxCalldata struct {
CallDataIndex uint64 CallDataIndex uint64
BatchIndices []uint64 BatchIndices []uint64
@@ -22,14 +30,13 @@ type CachedParsedTxCalldata struct {
EndBlocks []uint64 EndBlocks []uint64
} }
// ParseBackendL1EventLogs parses L1 watched events func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var msgHash string var msgHashes []MsgHashWrapper
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L1DepositETHSig: case backendabi.L1DepositETHSig:
@@ -37,7 +44,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog) err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err) log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -46,14 +53,13 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC20Sig: case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog) err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err) log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -64,14 +70,13 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC721Sig: case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{} event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog) err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err) log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -82,14 +87,13 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(), TokenIDs: event.TokenID.String(),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC1155Sig: case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog) err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err) log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -101,60 +105,24 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(), TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
MsgHash: msgHash,
}) })
case backendabi.L1SentMessageEventSignature: case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{} event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex() msgHashes = append(msgHashes, MsgHashWrapper{
case backendabi.L1BatchDepositERC721Sig: MsgHash: msgHash,
event := backendabi.BatchERC721MessageEvent{} TxHash: vlog.TxHash})
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: msgHash,
})
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: msgHash,
})
case backendabi.L1RelayedMessageEventSignature: case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{} event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, relayedMsgs, err return l1CrossMsg, msgHashes, relayedMsgs, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -165,18 +133,17 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
} }
} }
return l1CrossMsg, relayedMsgs, nil return l1CrossMsg, msgHashes, relayedMsgs, nil
} }
// ParseBackendL2EventLogs parses L2 watched events func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) {
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg // this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var l2SentMsgs []*orm.L2SentMsg var l2SentMsg []L2SentMsgWrapper
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig: case backendabi.L2WithdrawETHSig:
@@ -184,9 +151,8 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog) err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err) log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -194,16 +160,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC20Sig: case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog) err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err) log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -213,16 +177,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC721Sig: case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{} event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog) err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err) log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -232,16 +194,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(), TokenIDs: event.TokenID.String(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC1155Sig: case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog) err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err) log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -252,73 +212,34 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(), TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2SentMessageEventSignature: case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{} event := backendabi.L2SentMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message) msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsgs = append(l2SentMsgs, l2SentMsg = append(l2SentMsg,
&orm.L2SentMsg{ L2SentMsgWrapper{
Sender: event.Sender.Hex(), TxHash: vlog.TxHash,
TxHash: vlog.TxHash.Hex(), L2SentMsg: &orm.L2SentMsg{
Target: event.Target.Hex(), Sender: event.Sender.Hex(),
Value: event.Value.String(), Target: event.Target.Hex(),
MsgHash: msgHash.Hex(), Value: event.Value.String(),
Height: vlog.BlockNumber, MsgHash: msgHash.Hex(),
Nonce: event.MessageNonce.Uint64(), Height: vlog.BlockNumber,
MsgData: hexutil.Encode(event.Message), Nonce: event.MessageNonce.Uint64(),
MsgData: hexutil.Encode(event.Message),
},
}) })
case backendabi.L2RelayedMessageEventSignature: case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{} event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err return l2CrossMsg, relayedMsgs, l2SentMsg, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -328,10 +249,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
} }
} }
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil return l2CrossMsg, relayedMsgs, l2SentMsg, nil
} }
// ParseBatchInfoFromScrollChain parses ScrollChain events
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) { func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata) cache := make(map[string]CachedParsedTxCalldata)
@@ -388,13 +308,3 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
} }
return rollupBatches, nil return rollupBatches, nil
} }
func convertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}

View File

@@ -22,7 +22,6 @@ func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...))) return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
} }
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) { func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx) number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations { if err != nil || number <= confirmations {

View File

@@ -11,13 +11,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Name = "event-watcher" app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher" app.Usage = "The Scroll Event Watcher"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `event-watcher-test` app for integration-test. // Register `event-watcher-test` app for integration-test.
utils.RegisterSimulation(app, utils.EventWatcherApp) cutils.RegisterSimulation(app, cutils.EventWatcherApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name) cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := database.InitDB(cfg.DBConfig) db, err := utils.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = database.CloseDB(db); err != nil { if err = utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -75,14 +75,14 @@ func action(ctx *cli.Context) error {
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db) l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go utils.Loop(subCtx, 10*time.Second, func() { go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil { if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr) log.Error("Failed to fetch bridge contract", "err", loopErr)
} }
}) })
// Start l2 watcher process // Start l2 watcher process
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent) go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions // Finish start all l2 functions
log.Info("Start event-watcher successfully") log.Info("Start event-watcher successfully")

View File

@@ -11,15 +11,14 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils" "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -32,31 +31,31 @@ func init() {
app.Usage = "The Scroll Gas Oracle" app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle." app.Description = "Scroll Gas Oracle."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `gas-oracle-test` app for integration-test. // Register `gas-oracle-test` app for integration-test.
utils.RegisterSimulation(app, utils.GasOracleApp) cutils.RegisterSimulation(app, cutils.GasOracleApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name) cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
} }
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := database.InitDB(cfg.DBConfig) db, err := utils.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = database.CloseDB(db); err != nil { if err = utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -77,8 +76,7 @@ func action(ctx *cli.Context) error {
return err return err
} }
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig) l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil { if err != nil {
@@ -91,8 +89,8 @@ func action(ctx *cli.Context) error {
return err return err
} }
// Start l1 watcher process // Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) { go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations) number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -104,8 +102,8 @@ func action(ctx *cli.Context) error {
}) })
// Start l1relayer process // Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle) go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle) go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start gas-oracle successfully") log.Info("Start gas-oracle successfully")

View File

@@ -10,13 +10,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Usage = "The Scroll Message Relayer" app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1." app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `message-relayer-test` app for integration-test. // Register `message-relayer-test` app for integration-test.
utils.RegisterSimulation(app, utils.MessageRelayerApp) cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name) cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := database.InitDB(cfg.DBConfig) db, err := utils.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = database.CloseDB(db); err != nil { if err = utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -69,7 +69,7 @@ func action(ctx *cli.Context) error {
} }
// Start l1relayer process // Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents) go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start message-relayer successfully") log.Info("Start message-relayer successfully")

View File

@@ -11,15 +11,14 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils" "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -31,19 +30,19 @@ func init() {
app.Name = "rollup-relayer" app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer" app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...) app.Flags = append(app.Flags, cutils.RollupRelayerFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `rollup-relayer-test` app for integration-test. // Register `rollup-relayer-test` app for integration-test.
utils.RegisterSimulation(app, utils.RollupRelayerApp) cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name) cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -51,13 +50,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := database.InitDB(cfg.DBConfig) db, err := utils.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = database.CloseDB(db); err != nil { if err = utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -72,7 +71,7 @@ func action(ctx *cli.Context) error {
return err return err
} }
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name) initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil { if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
@@ -91,12 +90,11 @@ func action(ctx *cli.Context) error {
return err return err
} }
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks // Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -104,13 +102,13 @@ func action(ctx *cli.Context) error {
l2watcher.TryFetchRunningMissingBlocks(number) l2watcher.TryFetchRunningMissingBlocks(number)
}) })
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk) go cutils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch) go cutils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches) go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions. // Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully") log.Info("Start rollup-relayer successfully")

View File

@@ -4,14 +4,18 @@ go 1.19
require ( require (
github.com/agiledragon/gomonkey/v2 v2.9.0 github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/smartystreets/goconvey v1.8.0 github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.3 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.3.0 golang.org/x/sync v0.1.0
gorm.io/gorm v1.25.2 gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.1
) )
require ( require (
@@ -21,6 +25,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
@@ -29,15 +34,18 @@ require (
github.com/holiman/uint256 v1.2.2 // indirect github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
@@ -52,10 +60,11 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect golang.org/x/crypto v0.10.0 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect golang.org/x/tools v0.8.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View File

@@ -18,6 +18,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
@@ -28,6 +29,9 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -51,6 +55,13 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -58,11 +69,16 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -70,15 +86,20 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -92,12 +113,16 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -118,8 +143,16 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
@@ -127,48 +160,93 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho= gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=

View File

@@ -4,15 +4,13 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/common/database"
) )
// Config load configuration items. // Config load configuration items.
type Config struct { type Config struct {
L1Config *L1Config `json:"l1_config"` L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"` L2Config *L2Config `json:"l2_config"`
DBConfig *database.Config `json:"db_config"` DBConfig *DBConfig `json:"db_config"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.

View File

@@ -1,7 +1,7 @@
package database package config
// Config db config // DBConfig db config
type Config struct { type DBConfig struct {
// data source name // data source name
DSN string `json:"dsn"` DSN string `json:"dsn"`
DriverName string `json:"driver_name"` DriverName string `json:"driver_name"`

View File

@@ -40,7 +40,7 @@ type ChunkProposerConfig struct {
type BatchProposerConfig struct { type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"` MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"` MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"` MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"` MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"` BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
} }

View File

@@ -52,7 +52,7 @@ type Layer1Relayer struct {
gasPriceDiff uint64 gasPriceDiff uint64
l1MessageOrm *orm.L1Message l1MessageOrm *orm.L1Message
l1BlockOrm *orm.L1Block l1Block *orm.L1Block
} }
// NewLayer1Relayer will return a new instance of Layer1RelayerClient // NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -90,7 +90,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
l1Relayer := &Layer1Relayer{ l1Relayer := &Layer1Relayer{
ctx: ctx, ctx: ctx,
l1MessageOrm: orm.NewL1Message(db), l1MessageOrm: orm.NewL1Message(db),
l1BlockOrm: orm.NewL1Block(db), l1Block: orm.NewL1Block(db),
messageSender: messageSender, messageSender: messageSender,
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI, l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
@@ -159,13 +159,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2 // ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() { func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx) latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return return
} }
blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{ blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{
"number": latestBlockHeight, "number": latestBlockHeight,
}) })
if err != nil { if err != nil {
@@ -197,7 +197,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return return
} }
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String()) err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil { if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err) log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return return
@@ -232,14 +232,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -12,14 +12,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -50,7 +49,7 @@ var (
) )
func setupL1RelayerDB(t *testing.T) *gorm.DB { func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := database.InitDB(cfg.DBConfig) db, err := bridgeUtils.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -61,7 +60,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
// testCreateNewRelayer test create new relayer instance and stop // testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) { func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -69,7 +68,7 @@ func testCreateNewL1Relayer(t *testing.T) {
func testL1RelayerProcessSaveEvents(t *testing.T) { func testL1RelayerProcessSaveEvents(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
@@ -87,7 +86,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
func testL1RelayerMsgConfirm(t *testing.T) { func testL1RelayerMsgConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{ l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0}, {MsgHash: "msg-1", QueueIndex: 0},
@@ -124,12 +123,12 @@ func testL1RelayerMsgConfirm(t *testing.T) {
func testL1RelayerGasOracleConfirm(t *testing.T) { func testL1RelayerGasOracleConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
l1Block := []orm.L1Block{ l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)}, {Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)}, {Hash: "gas-oracle-2", Number: 1},
} }
// Insert test data. // Insert test data.
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block)) assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
@@ -153,8 +152,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"}) msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"}) msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported && return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
}) })
@@ -163,7 +162,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
func testL1RelayerProcessGasPriceOracle(t *testing.T) { func testL1RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -175,28 +174,28 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
var l1BlockOrm *orm.L1Block var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() { convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error") targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
return 0, targetErr return 0, targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
return 100, nil return 100, nil
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
convey.Convey("GetL1Blocks failure", t, func() { convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1Blocks error") targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr return nil, targetErr
}) })
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
convey.Convey("Block not exist", t, func() { convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1},
@@ -206,12 +205,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{ {
Hash: "gas-oracle-1", Hash: "gas-oracle-1",
Number: 0, Number: 0,
GasOracleStatus: int16(types.GasOraclePending), GasOracleStatus: int(types.GasOraclePending),
}, },
} }
return tmpInfo, nil return tmpInfo, nil

View File

@@ -22,6 +22,7 @@ import (
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -29,6 +30,7 @@ var (
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
) )
// Layer2Relayer is responsible for // Layer2Relayer is responsible for
@@ -169,8 +171,8 @@ func (r *Layer2Relayer) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &types.Chunk{ chunk := &bridgeTypes.Chunk{
Blocks: []*types.WrappedBlock{{ Blocks: []*bridgeTypes.WrappedBlock{{
Header: genesis, Header: genesis,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
@@ -189,7 +191,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
} }
var batch *orm.Batch var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX) batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
if err != nil { if err != nil {
return fmt.Errorf("failed to insert batch: %v", err) return fmt.Errorf("failed to insert batch: %v", err)
} }
@@ -317,7 +319,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
} }
for _, batch := range pendingBatches { for _, batch := range pendingBatches {
// get current header and parent header. // get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader) currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader)
if err != nil { if err != nil {
log.Error("Failed to decode batch header", "index", batch.Index, "error", err) log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
return return
@@ -344,7 +346,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
encodedChunks := make([][]byte, len(dbChunks)) encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
log.Error("Failed to fetch wrapped blocks", log.Error("Failed to fetch wrapped blocks",
@@ -352,7 +354,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
"end number", c.EndBlockNumber, "error", err) "end number", c.EndBlockNumber, "error", err)
return return
} }
chunk := &types.Chunk{ chunk := &bridgeTypes.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
var chunkBytes []byte var chunkBytes []byte
@@ -393,6 +395,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// ProcessCommittedBatches submit proof to layer 1 rollup contract // ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(int64(count))
log.Info("Skipping batches", "count", count)
}
// retrieves the earliest batch whose rollup status is 'committed' // retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{ fields := map[string]interface{}{
"rollup_status": types.RollupCommitted, "rollup_status": types.RollupCommitted,
@@ -420,6 +431,11 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// It's an intermediate state. The roller manager received the proof but has not verified // It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified. // the proof yet. We don't roll up the proof until it's verified.
return return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified: case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash) log.Info("Start to roll up zk proof", "hash", hash)
success := false success := false
@@ -439,8 +455,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
defer func() { defer func() {
// TODO: need to revisit this and have a more fine-grained error handling // TODO: need to revisit this and have a more fine-grained error handling
if !success { if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash) log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil { if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
} }
@@ -495,7 +511,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
r.processingFinalization.Store(txID, hash) r.processingFinalization.Store(txID, hash)
default: default:
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status) log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
} }
} }

View File

@@ -12,19 +12,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
func setupL2RelayerDB(t *testing.T) *gorm.DB { func setupL2RelayerDB(t *testing.T) *gorm.DB {
db, err := database.InitDB(cfg.DBConfig) db, err := bridgeUtils.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -34,7 +34,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) { func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -42,14 +42,14 @@ func testCreateNewRelayer(t *testing.T) {
func testL2RelayerProcessPendingBatches(t *testing.T) { func testL2RelayerProcessPendingBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
chunkOrm := orm.NewChunk(db) chunkOrm := orm.NewChunk(db)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
@@ -57,7 +57,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessPendingBatches() relayer.ProcessPendingBatches()
@@ -70,13 +70,13 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
func testL2RelayerProcessCommittedBatches(t *testing.T) { func testL2RelayerProcessCommittedBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
@@ -90,7 +90,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(statuses)) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizeFailed, statuses[0]) assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
@@ -108,9 +108,70 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0]) assert.Equal(t, types.RollupFinalizing, statuses[0])
} }
func testL2RelayerSkipBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batch.Hash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped),
createBatch(types.RollupCommitted, types.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped),
createBatch(types.RollupPending, types.ProvingTaskFailed),
createBatch(types.RollupCommitting, types.ProvingTaskFailed),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed),
createBatch(types.RollupFinalized, types.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
}
for _, id := range notSkipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
}
}
func testL2RelayerRollupConfirm(t *testing.T) { func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -126,7 +187,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys)) batchHashes := make([]string, len(processingKeys))
for i := range batchHashes { for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
batchHashes[i] = batch.Hash batchHashes[i] = batch.Hash
} }
@@ -171,13 +232,13 @@ func testL2RelayerRollupConfirm(t *testing.T) {
func testL2RelayerGasOracleConfirm(t *testing.T) { func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1}) batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
@@ -220,7 +281,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer database.CloseDB(db) defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,11 +9,10 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -26,12 +25,12 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// l2 block // l2 block
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
// chunk // chunk
chunk1 *types.Chunk chunk1 *bridgeTypes.Chunk
chunk2 *types.Chunk chunk2 *bridgeTypes.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -46,7 +45,7 @@ func setupEnv(t *testing.T) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &database.Config{ cfg.DBConfig = &config.DBConfig{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -59,19 +58,19 @@ func setupEnv(t *testing.T) {
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1) err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
assert.NoError(t, err) assert.NoError(t, err)
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}} chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2) err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}} chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -97,6 +96,7 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm) t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)

View File

@@ -343,20 +343,6 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
if gasTipCap.Cmp(feeData.gasTipCap) < 0 { if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap gasTipCap = feeData.gasTipCap
} }
// adjust for rising basefee
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 { if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice gasFeeCap = maxGasPrice
} }

View File

@@ -65,7 +65,6 @@ func TestSender(t *testing.T) {
t.Run("test min gas limit", testMinGasLimit) t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction) t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction) t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) }) t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -155,43 +154,6 @@ func testResubmitTransaction(t *testing.T) {
} }
} }
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransaction(t *testing.T) { func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes { for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig

View File

@@ -8,10 +8,9 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// BatchProposer proposes batches based on available unbatched chunks. // BatchProposer proposes batches based on available unbatched chunks.
@@ -19,13 +18,13 @@ type BatchProposer struct {
ctx context.Context ctx context.Context
db *gorm.DB db *gorm.DB
batchOrm *orm.Batch batchOrm *orm.Batch
chunkOrm *orm.Chunk chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block l2Block *orm.L2Block
maxChunkNumPerBatch uint64 maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64 maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32 maxL1CommitCalldataSizePerBatch uint64
minChunkNumPerBatch uint64 minChunkNumPerBatch uint64
batchTimeoutSec uint64 batchTimeoutSec uint64
} }
@@ -37,7 +36,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
db: db, db: db,
batchOrm: orm.NewBatch(db), batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db), chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db), l2Block: orm.NewL2Block(db),
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch, maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch, maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch, maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
@@ -93,6 +92,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
} }
if len(dbChunks) == 0 { if len(dbChunks) == 0 {
log.Warn("No Unbatched Chunks")
return nil, nil return nil, nil
} }
@@ -146,7 +146,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
} }
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch { if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit", log.Warn("The payload size of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch, "chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
) )
return nil, nil return nil, nil
@@ -154,16 +154,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return dbChunks, nil return dbChunks, nil
} }
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) { func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) {
chunks := make([]*types.Chunk, len(dbChunks)) chunks := make([]*bridgeTypes.Chunk, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
log.Error("Failed to fetch wrapped blocks", log.Error("Failed to fetch wrapped blocks",
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err) "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return nil, err return nil, err
} }
chunks[i] = &types.Chunk{ chunks[i] = &bridgeTypes.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
} }

View File

@@ -6,20 +6,21 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) { func testBatchProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{

View File

@@ -8,10 +8,9 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// ChunkProposer proposes chunks based on available unchunked blocks. // ChunkProposer proposes chunks based on available unchunked blocks.
@@ -59,8 +58,9 @@ func (p *ChunkProposer) TryProposeChunk() {
} }
} }
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error { func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
if chunk == nil { if chunk == nil {
log.Warn("proposed chunk is nil, cannot update in DB")
return nil return nil
} }
@@ -78,13 +78,14 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
return err return err
} }
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) { func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx) blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(blocks) == 0 { if len(blocks) == 0 {
log.Warn("no un-chunked blocks")
return nil, nil return nil, nil
} }
@@ -165,5 +166,5 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
) )
return nil, nil return nil, nil
} }
return &types.Chunk{Blocks: blocks}, nil return &bridgeTypes.Chunk{Blocks: blocks}, nil
} }

View File

@@ -6,20 +6,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testChunkProposer(t *testing.T) { func testChunkProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -32,8 +31,8 @@ func testChunkProposer(t *testing.T) {
}, db) }, db)
cp.TryProposeChunk() cp.TryProposeChunk()
expectedChunk := &types.Chunk{ expectedChunk := &bridgeTypes.Chunk{
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}, Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2},
} }
expectedHash, err := expectedChunk.Hash(0) expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -74,7 +74,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
} }
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx) savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0 savedL1BlockHeight = 0
@@ -149,10 +149,9 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
baseFee = block.BaseFee.Uint64() baseFee = block.BaseFee.Uint64()
} }
blocks = append(blocks, orm.L1Block{ blocks = append(blocks, orm.L1Block{
Number: uint64(height), Number: uint64(height),
Hash: block.Hash().String(), Hash: block.Hash().String(),
BaseFee: baseFee, BaseFee: baseFee,
GasOracleStatus: int16(types.GasOraclePending),
}) })
} }

View File

@@ -17,7 +17,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
commonTypes "scroll-tech/common/types" commonTypes "scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -37,13 +36,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
func testFetchContractEvent(t *testing.T) { func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
} }
func testL1WatcherClientFetchBlockHeader(t *testing.T) { func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
convey.Convey("test toBlock < fromBlock", t, func() { convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64 var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 { if watcher.ProcessedBlockHeight() <= 0 {
@@ -115,7 +114,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
func testL1WatcherClientFetchContractEvent(t *testing.T) { func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
watcher.SetConfirmations(rpc.SafeBlockNumber) watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() { convey.Convey("get latest confirmed block number failure", t, func() {
@@ -260,7 +259,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
@@ -306,7 +305,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
@@ -348,7 +347,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},

View File

@@ -22,6 +22,7 @@ import (
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils" "scroll-tech/bridge/internal/utils"
) )
@@ -109,7 +110,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
} }
// Fetch and store block traces for missing blocks // Fetch and store block traces for missing blocks
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit { for from := uint64(heightInDB) + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1 to := from + blockTracesFetchLimit - 1
if to > blockHeight { if to > blockHeight {
@@ -159,7 +160,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
} }
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error { func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock var blocks []*bridgeTypes.WrappedBlock
for number := from; number <= to; number++ { for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number) log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number))) block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -174,7 +175,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number) return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
} }
blocks = append(blocks, &types.WrappedBlock{ blocks = append(blocks, &bridgeTypes.WrappedBlock{
Header: block.Header(), Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()), Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot), WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),

View File

@@ -21,7 +21,6 @@ import (
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -43,7 +42,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
subCtx, cancel := context.WithCancel(context.Background()) subCtx, cancel := context.WithCancel(context.Background())
defer func() { defer func() {
cancel() cancel()
defer database.CloseDB(db) defer utils.CloseDB(db)
}() }()
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
@@ -69,7 +68,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
func testFetchRunningMissingBlocks(t *testing.T) { func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t) _, db := setupL2Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0]) auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
@@ -88,7 +87,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
wc := prepareWatcherClient(l2Cli, db, address) wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(latestHeight) wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && fetchedHeight == latestHeight return err == nil && uint64(fetchedHeight) == latestHeight
}) })
assert.True(t, ok) assert.True(t, ok)
} }
@@ -115,7 +114,7 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {
@@ -155,7 +154,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {

View File

@@ -9,13 +9,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -28,8 +27,8 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
@@ -41,7 +40,7 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &database.Config{ cfg.DBConfig = &config.DBConfig{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -57,7 +56,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
@@ -67,7 +66,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
@@ -75,7 +74,7 @@ func setupEnv(t *testing.T) (err error) {
} }
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
db, err := database.InitDB(cfg.DBConfig) db, err := utils.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,6 +10,8 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -33,12 +35,11 @@ type Batch struct {
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof // proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup // rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
@@ -71,7 +72,6 @@ func (*Batch) TableName() string {
// The returned batches are sorted in ascending order by their index. // The returned batches are sorted in ascending order by their index.
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) { func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -89,51 +89,44 @@ func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, o
var batches []*Batch var batches []*Batch
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList) return nil, err
} }
return batches, nil return batches, nil
} }
// GetBatchCount retrieves the total number of batches in the database. // GetBatchCount retrieves the total number of batches in the database.
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) { func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
var count int64 var count int64
if err := db.Count(&count).Error; err != nil { err := o.db.WithContext(ctx).Model(&Batch{}).Count(&count).Error
return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err) if err != nil {
return 0, err
} }
return uint64(count), nil return uint64(count), nil
} }
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash. // GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) { func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
var batch Batch var batch Batch
if err := db.Find(&batch).Error; err != nil { err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash) if err != nil {
return nil, err
} }
var proof message.AggProof var proof message.AggProof
if err := json.Unmarshal(batch.Proof, &proof); err != nil { err = json.Unmarshal(batch.Proof, &proof)
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash) if err != nil {
return nil, err
} }
return &proof, nil return &proof, nil
} }
// GetLatestBatch retrieves the latest batch from the database. // GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) { func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil { err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err) if err != nil {
return nil, err
} }
return &latestBatch, nil return &latestBatch, nil
} }
@@ -141,17 +134,13 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. // GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 { if len(hashes) == 0 {
return nil, nil return []types.RollupStatus{}, nil
} }
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
var batches []Batch var batches []Batch
if err := db.Find(&batches).Error; err != nil { err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes) if err != nil {
return nil, err
} }
hashToStatusMap := make(map[string]types.RollupStatus) hashToStatusMap := make(map[string]types.RollupStatus)
@@ -163,7 +152,7 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
for _, hash := range hashes { for _, hash := range hashes {
status, ok := hashToStatusMap[hash] status, ok := hashToStatusMap[hash]
if !ok { if !ok {
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash) return nil, fmt.Errorf("hash not found in database: %s", hash)
} }
statuses = append(statuses, status) statuses = append(statuses, status)
} }
@@ -178,40 +167,40 @@ func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, err
return nil, errors.New("limit must be greater than zero") return nil, errors.New("limit must be greater than zero")
} }
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ?", types.RollupPending)
db = db.Order("index ASC")
db = db.Limit(limit)
var batches []*Batch var batches []*Batch
db := o.db.WithContext(ctx)
db = db.Where("rollup_status = ?", types.RollupPending).Order("index ASC").Limit(limit)
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err) return nil, err
} }
return batches, nil return batches, nil
} }
// GetBatchByIndex retrieves the batch by the given index. // GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) { func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch var batch Batch
if err := db.First(&batch).Error; err != nil { err := o.db.WithContext(ctx).Where("index = ?", index).First(&batch).Error
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index) if err != nil {
return nil, err
} }
return &batch, nil return &batch, nil
} }
// InsertBatch inserts a new batch into the database. // InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) { func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 { if len(chunks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
parentBatch, err := o.GetLatestBatch(ctx) parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) { if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err) log.Error("failed to get the latest batch", "err", err)
return nil, err return nil, err
} }
@@ -228,8 +217,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
batchIndex = parentBatch.Index + 1 batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash) parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *types.BatchHeader var parentBatchHeader *bridgeTypes.BatchHeader
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader) parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil { if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err) log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err return nil, err
@@ -239,7 +228,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
version = parentBatchHeader.Version() version = parentBatchHeader.Version()
} }
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks) batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil { if err != nil {
log.Error("failed to create batch header", log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore, "index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
@@ -251,53 +240,59 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
lastChunkBlockNum := len(chunks[numChunks-1].Blocks) lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{ newBatch := Batch{
Index: batchIndex, Index: batchIndex,
Hash: batchHeader.Hash().Hex(), Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash, StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash, EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex, EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(), StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(), WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(), BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending), ProvingStatus: int16(types.ProvingTaskUnassigned),
ProvingStatus: int16(types.ProvingTaskUnassigned), RollupStatus: int16(types.RollupPending),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
} }
db := o.db if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err) log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err) return nil, err
} }
return &newBatch, nil return &newBatch, nil
} }
// UpdateSkippedBatches updates the skipped batches in the database.
func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
}
return uint64(result.RowsAffected), nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch. // UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status) updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
db := o.db.WithContext(ctx) return err
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
} }
return nil return nil
} }
// UpdateProvingStatus updates the proving status of a batch. // UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -308,24 +303,22 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
db := o.db if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
if len(dbTX) > 0 && dbTX[0] != nil { return err
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
// UpdateRollupStatus updates the rollup status of a batch. // UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status) updateFields["rollup_status"] = int(status)
@@ -335,17 +328,8 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
case types.RollupFinalized: case types.RollupFinalized:
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
db := o.db return err
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -358,13 +342,8 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
if status == types.RollupCommitted { if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now() updateFields["committed_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
db := o.db.WithContext(ctx) return err
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
} }
return nil return nil
} }
@@ -377,35 +356,23 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
if status == types.RollupFinalized { if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
db := o.db.WithContext(ctx) return err
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
} }
return nil return nil
} }
// UpdateProofByHash updates the batch proof by hash. // UpdateProofByHash updates the block batch proof by hash.
// for unit test. // for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error { func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof) proofBytes, err := json.Marshal(proof)
if err != nil { if err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash) return err
} }
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
db := o.db.WithContext(ctx) return err
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err = db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
} }

View File

@@ -3,11 +3,12 @@ package orm
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"time" "time"
"scroll-tech/common/types" "scroll-tech/common/types"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -25,22 +26,22 @@ type Chunk struct {
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"` EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"` StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"` TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"` TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
// proof // proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -62,22 +63,19 @@ func (*Chunk) TableName() string {
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) { func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
if startIndex > endIndex { if startIndex > endIndex {
return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex) return nil, errors.New("start index should be less than or equal to end index")
} }
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC")
var chunks []*Chunk var chunks []*Chunk
db := o.db.WithContext(ctx).Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC")
if err := db.Find(&chunks).Error; err != nil { if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex) return nil, err
} }
// sanity check if startIndex+uint64(len(chunks)) != endIndex+1 {
if uint64(len(chunks)) != endIndex-startIndex+1 { return nil, errors.New("number of chunks not expected in the specified range")
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
} }
return chunks, nil return chunks, nil
@@ -85,43 +83,46 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
// GetUnbatchedChunks retrieves unbatched chunks from the database. // GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) { func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil { err := o.db.WithContext(ctx).
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err) Where("batch_hash IS NULL").
Order("index asc").
Find(&chunks).Error
if err != nil {
return nil, err
} }
return chunks, nil return chunks, nil
} }
// GetLatestChunk retrieves the latest chunk from the database. // GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) { func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk var latestChunk Chunk
if err := db.First(&latestChunk).Error; err != nil { err := o.db.WithContext(ctx).
return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err) Order("index desc").
First(&latestChunk).Error
if err != nil {
return nil, err
} }
return &latestChunk, nil return &latestChunk, nil
} }
// InsertChunk inserts a new chunk into the database. // InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) { func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 { if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
var chunkIndex uint64 var chunkIndex uint64
var totalL1MessagePoppedBefore uint64 var totalL1MessagePoppedBefore uint64
parentChunk, err := o.GetLatestChunk(ctx) parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) { if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err) log.Error("failed to get latest chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err) return nil, err
} }
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's // if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
@@ -129,13 +130,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk // if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil { if parentChunk != nil {
chunkIndex = parentChunk.Index + 1 chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk) totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk
} }
hash, err := chunk.Hash(totalL1MessagePoppedBefore) hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil { if err != nil {
log.Error("failed to get chunk hash", "err", err) log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err) return nil, err
} }
var totalL2TxGas uint64 var totalL2TxGas uint64
@@ -158,24 +159,18 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(), EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(), EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas, TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: uint32(totalL2TxNum), TotalL2TxNum: totalL2TxNum,
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize), TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas, TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time, StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore, TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)), TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil { if err := db.Create(&newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash) log.Error("failed to insert chunk", "hash", hash, "err", err)
return nil, err
} }
return &newChunk, nil return &newChunk, nil
@@ -183,6 +178,11 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
// UpdateProvingStatus updates the proving status of a chunk. // UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -193,18 +193,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
db := o.db if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
if len(dbTX) > 0 && dbTX[0] != nil { return err
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -216,12 +209,10 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.WithContext(ctx) db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil { if err := db.Update("batch_hash", batchHash).Error; err != nil {
return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash) return err
} }
return nil return nil
} }

View File

@@ -2,9 +2,8 @@ package orm
import ( import (
"context" "context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
@@ -14,19 +13,14 @@ import (
type L1Block struct { type L1Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
// block Number uint64 `json:"number" gorm:"column:number"`
Number uint64 `json:"number" gorm:"column:number"` Hash string `json:"hash" gorm:"column:hash"`
Hash string `json:"hash" gorm:"column:hash"` HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"` BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
// oracle ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"`
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"` GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Block create an l1Block instance // NewL1Block create an l1Block instance
@@ -40,64 +34,54 @@ func (*L1Block) TableName() string {
} }
// GetLatestL1BlockHeight get the latest l1 block height // GetLatestL1BlockHeight get the latest l1 block height
func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) { func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) {
db := o.db.WithContext(ctx) result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row()
db = db.Model(&L1Block{}) if result.Err() != nil {
db = db.Select("COALESCE(MAX(number), 0)") return 0, result.Err()
}
var maxNumber uint64 var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil { if err := result.Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err) return 0, err
} }
return maxNumber, nil return maxNumber, nil
} }
// GetL1Blocks get the l1 blocks // GetL1Blocks get the l1 blocks
func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) { func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) {
db := o.db.WithContext(ctx) var l1Blocks []L1Block
db = db.Model(&L1Block{}) db := l.db
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
} }
db = db.Order("number ASC") db = db.Order("number ASC")
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil { if err := db.Find(&l1Blocks).Error; err != nil {
return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields) return nil, err
} }
return l1Blocks, nil return l1Blocks, nil
} }
// InsertL1Blocks batch insert l1 blocks // InsertL1Blocks batch insert l1 blocks
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error { func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 { if len(blocks) == 0 {
return nil return nil
} }
db := o.db.WithContext(ctx) err := l.db.WithContext(ctx).Create(&blocks).Error
db = db.Model(&L1Block{}) if err != nil {
log.Error("failed to insert L1 Blocks", "err", err)
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
} }
return nil return err
} }
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash // UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error { func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{ updateFields := map[string]interface{}{
"oracle_status": int(status), "oracle_status": int(status),
"oracle_tx_hash": txHash, "oracle_tx_hash": txHash,
} }
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
db := o.db.WithContext(ctx) return err
db = db.Model(&L1Block{})
db = db.Where("hash", blockHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
} }
return nil return nil
} }

View File

@@ -3,7 +3,6 @@ package orm
import ( import (
"context" "context"
"database/sql" "database/sql"
"time"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -26,11 +25,6 @@ type L1Message struct {
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"` Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"` Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"` Status int `json:"status" gorm:"column:status;default:1"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Message create an L1MessageOrm instance // NewL1Message create an L1MessageOrm instance

View File

@@ -3,39 +3,31 @@ package orm
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"time"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/bridge/internal/types"
) )
// L2Block represents a l2 block in the database. // L2Block represents a l2 block in the database.
type L2Block struct { type L2Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"` Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"` Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"` ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"` Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"` Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"` WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"` TxNum uint64 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"` GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"` BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL2Block creates a new L2Block instance // NewL2Block creates a new L2Block instance
@@ -50,30 +42,25 @@ func (*L2Block) TableName() string {
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block. // GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
// If the l2_block table is empty, it returns 0 to represent the genesis block height. // If the l2_block table is empty, it returns 0 to represent the genesis block height.
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) { // In case of an error, it returns -1 along with the error.
db := o.db.WithContext(ctx) func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) {
db = db.Model(&L2Block{}) var maxNumber int64
db = db.Select("COALESCE(MAX(number), 0)") if err := o.db.WithContext(ctx).Model(&L2Block{}).Select("COALESCE(MAX(number), 0)").Row().Scan(&maxNumber); err != nil {
return -1, err
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
} }
return maxNumber, nil return maxNumber, nil
} }
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk. // GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) { func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
var l2Blocks []L2Block var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root").
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err) Where("chunk_hash IS NULL").
Order("number asc").
Find(&l2Blocks).Error; err != nil {
return nil, err
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -81,12 +68,12 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err) return nil, err
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err) return nil, err
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -100,7 +87,6 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
// The returned L2Blocks are sorted in ascending order by their block number. // The returned L2Blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) { func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -118,7 +104,7 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
var l2Blocks []*L2Block var l2Blocks []*L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList) return nil, err
} }
return l2Blocks, nil return l2Blocks, nil
} }
@@ -128,23 +114,20 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) { func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
if startBlockNumber > endBlockNumber { if startBlockNumber > endBlockNumber {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber) return nil, errors.New("start block number should be less than or equal to end block number")
} }
var l2Blocks []L2Block
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber) db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC") db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber) return nil, err
} }
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 { if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks)) return nil, errors.New("number of blocks not expected in the specified range")
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -152,12 +135,12 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber) return nil, err
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber) return nil, err
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
@@ -174,13 +157,13 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
header, err := json.Marshal(block.Header) header, err := json.Marshal(block.Header)
if err != nil { if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err) return err
} }
txs, err := json.Marshal(block.Transactions) txs, err := json.Marshal(block.Transactions)
if err != nil { if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err) return err
} }
l2Block := L2Block{ l2Block := L2Block{
@@ -189,7 +172,7 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
ParentHash: block.Header.ParentHash.String(), ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs), Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(), WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint32(len(block.Transactions)), TxNum: uint64(len(block.Transactions)),
GasUsed: block.Header.GasUsed, GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time, BlockTimestamp: block.Header.Time,
Header: string(header), Header: string(header),
@@ -197,11 +180,9 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
l2Blocks = append(l2Blocks, l2Block) l2Blocks = append(l2Blocks, l2Block)
} }
db := o.db.WithContext(ctx) if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil {
db = db.Model(&L2Block{}) log.Error("failed to insert l2Blocks", "err", err)
return err
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
return nil return nil
} }
@@ -215,19 +196,13 @@ func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64,
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
db = db.WithContext(ctx).Model(&L2Block{}).Where("number >= ? AND number <= ?", startIndex, endIndex)
tx := db.Update("chunk_hash", chunkHash) tx := db.Update("chunk_hash", chunkHash)
if tx.Error != nil {
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash) if tx.RowsAffected != int64(endIndex-startIndex+1) {
return fmt.Errorf("expected %d rows to be updated, got %d", endIndex-startIndex+1, tx.RowsAffected)
} }
// sanity check return tx.Error
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
}
return nil
} }

View File

@@ -0,0 +1,61 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -0,0 +1,86 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -0,0 +1,37 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -3,21 +3,19 @@
create table l1_block create table l1_block
( (
-- block
number BIGINT NOT NULL, number BIGINT NOT NULL,
hash VARCHAR NOT NULL, hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL, base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
-- oracle import_tx_hash VARCHAR DEFAULT NULL,
oracle_status SMALLINT NOT NULL DEFAULT 1, oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL, oracle_tx_hash VARCHAR DEFAULT NULL
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
); );
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed'; on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';

View File

@@ -3,7 +3,6 @@
create table l2_block create table l2_block
( (
-- block
number BIGINT NOT NULL, number BIGINT NOT NULL,
hash VARCHAR NOT NULL, hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL, parent_hash VARCHAR NOT NULL,
@@ -13,14 +12,7 @@ create table l2_block
tx_num INTEGER NOT NULL, tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL, gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL, block_timestamp NUMERIC NOT NULL,
chunk_hash VARCHAR DEFAULT NULL
-- chunk
chunk_hash VARCHAR DEFAULT NULL,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
); );
create unique index l2_block_hash_uindex create unique index l2_block_hash_uindex

View File

@@ -11,7 +11,7 @@ create table chunk
end_block_number BIGINT NOT NULL, end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL, end_block_hash VARCHAR NOT NULL,
total_l1_messages_popped_before BIGINT NOT NULL, total_l1_messages_popped_before BIGINT NOT NULL,
total_l1_messages_popped_in_chunk INTEGER NOT NULL, total_l1_messages_popped_in_chunk BIGINT NOT NULL,
start_block_time BIGINT NOT NULL, start_block_time BIGINT NOT NULL,
-- proof -- proof
@@ -19,15 +19,15 @@ create table chunk
proof BYTEA DEFAULT NULL, proof BYTEA DEFAULT NULL,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL, prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL, proved_at TIMESTAMP(0) DEFAULT NULL,
proof_time_sec INTEGER DEFAULT NULL, proof_time_sec SMALLINT DEFAULT NULL,
-- batch -- batch
batch_hash VARCHAR DEFAULT NULL, batch_hash VARCHAR DEFAULT NULL,
-- metadata -- metadata
total_l2_tx_gas BIGINT NOT NULL, total_l2_tx_gas BIGINT NOT NULL,
total_l2_tx_num INTEGER NOT NULL, total_l2_tx_num BIGINT NOT NULL,
total_l1_commit_calldata_size INTEGER NOT NULL, total_l1_commit_calldata_size BIGINT NOT NULL,
total_l1_commit_gas BIGINT NOT NULL, total_l1_commit_gas BIGINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
@@ -35,7 +35,7 @@ create table chunk
); );
comment comment
on column chunk.proving_status is 'undefined, unassigned, assigned, proved, verified, failed'; on column chunk.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
create unique index chunk_index_uindex create unique index chunk_index_uindex
on chunk (index); on chunk (index);

View File

@@ -15,7 +15,6 @@ create table batch
batch_header BYTEA NOT NULL, batch_header BYTEA NOT NULL,
-- proof -- proof
chunk_proofs_status SMALLINT NOT NULL DEFAULT 1,
proving_status SMALLINT NOT NULL DEFAULT 1, proving_status SMALLINT NOT NULL DEFAULT 1,
proof BYTEA DEFAULT NULL, proof BYTEA DEFAULT NULL,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL, prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
@@ -46,13 +45,10 @@ create unique index batch_hash_uindex
on batch (hash); on batch (hash);
comment comment
on column batch.chunk_proofs_status is 'undefined, pending, ready'; on column batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment comment
on column batch.proving_status is 'undefined, unassigned, assigned, proved, verified, failed'; on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, commit_failed, finalize_failed';
-- +goose StatementEnd -- +goose StatementEnd

View File

@@ -10,11 +10,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -25,10 +27,10 @@ var (
chunkOrm *Chunk chunkOrm *Chunk
batchOrm *Batch batchOrm *Batch
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
chunk1 *types.Chunk chunk1 *bridgeTypes.Chunk
chunk2 *types.Chunk chunk2 *bridgeTypes.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -44,8 +46,8 @@ func setupEnv(t *testing.T) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
base.RunDBImage(t) base.RunDBImage(t)
var err error var err error
db, err = database.InitDB( db, err = utils.InitDB(
&database.Config{ &config.DBConfig{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -62,22 +64,28 @@ func setupEnv(t *testing.T) {
l2BlockOrm = NewL2Block(db) l2BlockOrm = NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) if err != nil {
wrappedBlock1 = &types.WrappedBlock{} t.Fatalf("failed to read file: %v", err)
err = json.Unmarshal(templateBlockTrace, wrappedBlock1) }
assert.NoError(t, err) wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
assert.NoError(t, err) if err != nil {
wrappedBlock2 = &types.WrappedBlock{} t.Fatalf("failed to read file: %v", err)
err = json.Unmarshal(templateBlockTrace, wrappedBlock2) }
assert.NoError(t, err) wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}} chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}} chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -94,12 +102,12 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, uint64(3), height) assert.Equal(t, int64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background()) blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
@@ -127,6 +135,9 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
@@ -166,24 +177,35 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
hash1 := batch1.Hash hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0) batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader) batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex() batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1) assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
hash2 := batch2.Hash hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1) batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader) batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash2 := batchHeader2.Hash().Hex() batchHash2 := batchHeader2.Hash().Hex()
assert.Equal(t, hash2, batchHash2) assert.Equal(t, hash2, batchHash2)
@@ -202,11 +224,32 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, types.RollupPending, rollupStatus[0]) assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1]) assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted)
assert.NoError(t, err)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(0), count)
batch, err := batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus))
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1) dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err) assert.Error(t, err, gorm.ErrRecordNotFound)
assert.Nil(t, dbProof) assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)

View File

@@ -11,7 +11,7 @@ import (
func TestNewBatchHeader(t *testing.T) { func TestNewBatchHeader(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -36,7 +36,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
// 1 L1 Msg in 1 bitmap // 1 L1 Msg in 1 bitmap
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -54,7 +54,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs // many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json") templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_05.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{} wrappedBlock3 := &WrappedBlock{}
@@ -87,7 +87,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many sparse L1 Msgs in 1 bitmap // many sparse L1 Msgs in 1 bitmap
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json") templateBlockTrace4, err := os.ReadFile("../../../common/testdata/blockTrace_06.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock4 := &WrappedBlock{} wrappedBlock4 := &WrappedBlock{}
@@ -106,7 +106,7 @@ func TestNewBatchHeader(t *testing.T) {
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many L1 Msgs in each of 2 bitmaps // many L1 Msgs in each of 2 bitmaps
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json") templateBlockTrace5, err := os.ReadFile("../../../common/testdata/blockTrace_07.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock5 := &WrappedBlock{} wrappedBlock5 := &WrappedBlock{}
@@ -127,7 +127,7 @@ func TestNewBatchHeader(t *testing.T) {
func TestBatchHeaderEncode(t *testing.T) { func TestBatchHeaderEncode(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -154,7 +154,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes)) assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
// With L1 Msg // With L1 Msg
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -174,7 +174,7 @@ func TestBatchHeaderEncode(t *testing.T) {
func TestBatchHeaderHash(t *testing.T) { func TestBatchHeaderHash(t *testing.T) {
// Without L1 Msg // Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -199,7 +199,7 @@ func TestBatchHeaderHash(t *testing.T) {
hash := batchHeader.Hash() hash := batchHeader.Hash()
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes())) assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json") templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -216,7 +216,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes())) assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
// With L1 Msg // With L1 Msg
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json") templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{} wrappedBlock3 := &WrappedBlock{}

View File

@@ -0,0 +1,136 @@
package types
import (
"encoding/binary"
"errors"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -32,7 +32,7 @@ func TestChunkEncode(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte") assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
// Test case 3: when the chunk contains one block. // Test case 3: when the chunk contains one block.
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
@@ -50,7 +50,7 @@ func TestChunkEncode(t *testing.T) {
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString) assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
// Test case 4: when the chunk contains one block with 1 L1MsgTx // Test case 4: when the chunk contains one block with 1 L1MsgTx
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
@@ -92,7 +92,7 @@ func TestChunkHash(t *testing.T) {
assert.Contains(t, err.Error(), "number of blocks is 0") assert.Contains(t, err.Error(), "number of blocks is 0")
// Test case 2: successfully hashing a chunk on one block // Test case 2: successfully hashing a chunk on one block
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock := &WrappedBlock{} wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock)) assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
@@ -106,7 +106,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex()) assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex())
// Test case 3: successfully hashing a chunk on two blocks // Test case 3: successfully hashing a chunk on two blocks
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json") templateBlockTrace1, err := os.ReadFile("../../../common/testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock1 := &WrappedBlock{} wrappedBlock1 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1)) assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
@@ -121,7 +121,7 @@ func TestChunkHash(t *testing.T) {
assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex()) assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex())
// Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs // Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{} wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2)) assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))

View File

@@ -0,0 +1,43 @@
package utils
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/bridge/internal/config"
)
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -11,12 +11,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/database/migrate"
bcmd "scroll-tech/bridge/cmd" bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge" "scroll-tech/bridge/mock_bridge"
) )
@@ -46,13 +46,13 @@ var (
) )
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
cfg := &database.Config{ cfg := &config.DBConfig{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum, MaxIdleNum: base.DBConfig.MaxIdleNum,
} }
db, err := database.InitDB(cfg) db, err := utils.InitDB(cfg)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,17 +9,18 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testImportL1GasPrice(t *testing.T) { func testImportL1GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -43,10 +44,10 @@ func testImportL1GasPrice(t *testing.T) {
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
// check db status // check db status
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background()) latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight) assert.Equal(t, number, latestBlockHeight)
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight}) blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Empty(t, blocks[0].OracleTxHash) assert.Empty(t, blocks[0].OracleTxHash)
@@ -54,7 +55,7 @@ func testImportL1GasPrice(t *testing.T) {
// relay gas price // relay gas price
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight}) blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.NotEmpty(t, blocks[0].OracleTxHash) assert.NotEmpty(t, blocks[0].OracleTxHash)
@@ -63,7 +64,7 @@ func testImportL1GasPrice(t *testing.T) {
func testImportL2GasPrice(t *testing.T) { func testImportL2GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
prepareContracts(t) prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config l2Cfg := bridgeApp.Config.L2Config
@@ -71,8 +72,8 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// add fake chunk // add fake chunk
chunk := &types.Chunk{ chunk := &bridgeTypes.Chunk{
Blocks: []*types.WrappedBlock{ Blocks: []*bridgeTypes.WrappedBlock{
{ {
Header: &gethTypes.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1), Number: big.NewInt(1),
@@ -89,7 +90,7 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk}) _, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk})
assert.NoError(t, err) assert.NoError(t, err)
// check db status // check db status

View File

@@ -11,17 +11,17 @@ import (
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
) )
func testRelayL1MessageSucceed(t *testing.T) { func testRelayL1MessageSucceed(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
prepareContracts(t) prepareContracts(t)

View File

@@ -11,15 +11,16 @@ import (
_ "scroll-tech/bridge/cmd/msg_relayer/app" _ "scroll-tech/bridge/cmd/msg_relayer/app"
_ "scroll-tech/bridge/cmd/rollup_relayer/app" _ "scroll-tech/bridge/cmd/rollup_relayer/app"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/bridge/internal/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func testProcessStart(t *testing.T) { func testProcessStart(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
bridgeApp.RunApp(t, cutils.EventWatcherApp) bridgeApp.RunApp(t, cutils.EventWatcherApp)
bridgeApp.RunApp(t, cutils.GasOracleApp) bridgeApp.RunApp(t, cutils.GasOracleApp)
@@ -31,7 +32,7 @@ func testProcessStart(t *testing.T) {
func testProcessStartEnableMetrics(t *testing.T) { func testProcessStartEnableMetrics(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
port, err := rand.Int(rand.Reader, big.NewInt(2000)) port, err := rand.Int(rand.Reader, big.NewInt(2000))
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,7 +10,6 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -18,11 +17,13 @@ import (
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testCommitBatchAndFinalizeBatch(t *testing.T) { func testCommitBatchAndFinalizeBatch(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer database.CloseDB(db) defer utils.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -36,7 +37,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db // add some blocks to db
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
header := gethTypes.Header{ header := gethTypes.Header{
Number: big.NewInt(int64(i)), Number: big.NewInt(int64(i)),
@@ -44,7 +45,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
} }
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{ wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{
Header: &header, Header: &header,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},

View File

@@ -0,0 +1,56 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -0,0 +1,134 @@
package utils
import (
"context"
"encoding/json"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
var (
tests = []struct {
input string
mustFail bool
expected rpc.BlockNumber
}{
{`"0x"`, true, rpc.BlockNumber(0)},
{`"0x0"`, false, rpc.BlockNumber(0)},
{`"0X1"`, false, rpc.BlockNumber(1)},
{`"0x00"`, true, rpc.BlockNumber(0)},
{`"0x01"`, true, rpc.BlockNumber(0)},
{`"0x1"`, false, rpc.BlockNumber(1)},
{`"0x12"`, false, rpc.BlockNumber(18)},
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
{"0", true, rpc.BlockNumber(0)},
{`"ff"`, true, rpc.BlockNumber(0)},
{`"safe"`, false, rpc.SafeBlockNumber},
{`"finalized"`, false, rpc.FinalizedBlockNumber},
{`"pending"`, false, rpc.PendingBlockNumber},
{`"latest"`, false, rpc.LatestBlockNumber},
{`"earliest"`, false, rpc.EarliestBlockNumber},
{`someString`, true, rpc.BlockNumber(0)},
{`""`, true, rpc.BlockNumber(0)},
{``, true, rpc.BlockNumber(0)},
}
)
func TestUnmarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
err := json.Unmarshal([]byte(test.input), &num)
if test.mustFail && err == nil {
t.Errorf("Test %d should fail", i)
continue
}
if !test.mustFail && err != nil {
t.Errorf("Test %d should pass but got err: %v", i, err)
continue
}
if num != test.expected {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.NoError(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.NoError(t, err)
got, err := json.Marshal(&num)
assert.NoError(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
}
type MockEthClient struct {
val uint64
}
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
return e.val, nil
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
var blockNumber int64
switch number.Int64() {
case int64(rpc.LatestBlockNumber):
blockNumber = int64(e.val)
case int64(rpc.SafeBlockNumber):
blockNumber = int64(e.val) - 6
case int64(rpc.FinalizedBlockNumber):
blockNumber = int64(e.val) - 12
default:
blockNumber = number.Int64()
}
if blockNumber < 0 {
blockNumber = 0
}
return &types.Header{Number: new(big.Int).SetInt64(blockNumber)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
testCases := []struct {
blockNumber uint64
confirmation rpc.BlockNumber
expectedResult uint64
}{
{5, 6, 0},
{7, 6, 1},
{10, 2, 8},
{0, 1, 0},
{3, 0, 3},
{15, 15, 0},
{16, rpc.SafeBlockNumber, 10},
{22, rpc.FinalizedBlockNumber, 10},
{10, rpc.LatestBlockNumber, 10},
{5, rpc.SafeBlockNumber, 0},
{11, rpc.FinalizedBlockNumber, 0},
}
for _, testCase := range testCases {
client.val = testCase.blockNumber
confirmed, err := GetLatestConfirmedBlockNumber(ctx, &client, testCase.confirmation)
assert.NoError(t, err)
assert.Equal(t, testCase.expectedResult, confirmed)
}
}

65
bridge/utils/utils.go Normal file
View File

@@ -0,0 +1,65 @@
package utils
import (
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
bridgeabi "scroll-tech/bridge/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := bridgeabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}

View File

@@ -0,0 +1,49 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := ComputeMessageHash(
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
big.NewInt(0),
big.NewInt(1),
[]byte("testbridgecontract"),
)
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
}
func TestBufferToUint256Le(t *testing.T) {
input := []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
expectedOutput := []*big.Int{big.NewInt(1)}
result := BufferToUint256Le(input)
assert.Equal(t, expectedOutput, result)
}

View File

@@ -75,10 +75,6 @@ linters-settings:
# report about shadowed variables # report about shadowed variables
check-shadowing: true check-shadowing: true
gosec:
disable:
- G108
golint: golint:
# minimal confidence for issues, default is 0.8 # minimal confidence for issues, default is 0.8
min-confidence: 0.8 min-confidence: 0.8
@@ -213,13 +209,6 @@ issues:
linters: linters:
- errcheck - errcheck
- gosec - gosec
# Exclude abi files in bridge-history-api
- path: backend_abi\.go
linters:
- errcheck
- gosec
- golint
# Exclude some staticcheck messages # Exclude some staticcheck messages
- linters: - linters:
@@ -231,12 +220,7 @@ issues:
- lll - lll
source: "^//go:generate " source: "^//go:generate "
text: "long-lines" text: "long-lines"
# Exclude gosec issues for G108: Profiling endpoint is automatically exposed
- linters:
- gosec
text: "G108"
- linters: - linters:
- wsl - wsl
text: "return statements should not be cuddled if block has more than two lines" text: "return statements should not be cuddled if block has more than two lines"

View File

@@ -1,84 +0,0 @@
package database
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/utils"
)
type gormLogger struct {
gethLogger log.Logger
}
func (g *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
return g
}
func (g *gormLogger) Info(_ context.Context, msg string, data ...interface{}) {
infoMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Info("gorm", "info message", infoMsg)
}
func (g *gormLogger) Warn(_ context.Context, msg string, data ...interface{}) {
warnMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Warn("gorm", "warn message", warnMsg)
}
func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
errMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Error("gorm", "err message", errMsg)
}
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler
func InitDB(config *Config) (*gorm.DB, error) {
tmpGormLogger := gormLogger{
gethLogger: log.Root(),
}
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: &tmpGormLogger,
NowFunc: func() time.Time {
utc, _ := time.LoadLocation("")
return time.Now().In(utc)
},
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -1,35 +0,0 @@
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}

View File

@@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd" "scroll-tech/common/cmd"
"scroll-tech/common/utils" "scroll-tech/common/utils"
@@ -66,12 +65,8 @@ func (i *ImgDB) Stop() error {
if i.id == "" { if i.id == "" {
i.id = GetContainerID(i.name) i.id = GetContainerID(i.name)
} }
timeout := time.Second * 3
timeoutSec := 3 if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err return err
} }
// remove the stopped container. // remove the stopped container.

View File

@@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd" "scroll-tech/common/cmd"
@@ -136,11 +135,8 @@ func (i *ImgGeth) Stop() error {
// check if container is running, stop the running container. // check if container is running, stop the running container.
id := GetContainerID(i.name) id := GetContainerID(i.name)
if id != "" { if id != "" {
timeoutSec := 3 timeout := time.Second * 3
timeout := container.StopOptions{ if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err return err
} }
i.id = id i.id = id

View File

@@ -3,23 +3,20 @@ module scroll-tech/common
go 1.19 go 1.19
require ( require (
github.com/docker/docker v23.0.6+incompatible github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5 github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.7
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19 github.com/mattn/go-isatty v0.0.18
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/stretchr/testify v1.8.3 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.2
) )
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -37,7 +34,7 @@ require (
github.com/go-kit/kit v0.9.0 // indirect github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
@@ -54,25 +51,20 @@ require (
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.5.0 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect github.com/onsi/gomega v1.27.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
@@ -85,6 +77,7 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.3 // indirect github.com/scroll-tech/zktrie v0.5.3 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect
@@ -92,14 +85,15 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect golang.org/x/crypto v0.10.0 // indirect
golang.org/x/mod v0.12.0 // indirect golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.12.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/sys v0.9.0 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect golang.org/x/tools v0.8.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -18,13 +18,13 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
@@ -63,6 +63,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -79,8 +80,8 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU= github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -120,8 +121,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
@@ -211,21 +212,9 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
@@ -251,7 +240,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -263,8 +251,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -279,16 +267,16 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -296,8 +284,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
@@ -325,8 +313,8 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@@ -366,7 +354,6 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -384,6 +371,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -404,9 +393,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -416,8 +404,8 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
@@ -427,7 +415,6 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -445,10 +432,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -477,9 +462,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -501,11 +485,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -519,10 +500,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -558,30 +537,23 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -603,6 +575,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -618,9 +591,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -690,11 +662,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -1898,7 +1898,7 @@ source = "git+https://github.com/scroll-tech/halo2-snark-aggregator?branch=scrol
dependencies = [ dependencies = [
"group", "group",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"num-bigint", "num-bigint",
"num-integer", "num-integer",
] ]
@@ -1929,7 +1929,7 @@ dependencies = [
"digest 0.10.3", "digest 0.10.3",
"group", "group",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"log", "log",
"num-bigint", "num-bigint",
"poseidon", "poseidon",
@@ -1947,7 +1947,7 @@ dependencies = [
"halo2-ecc-circuit-lib", "halo2-ecc-circuit-lib",
"halo2-snark-aggregator-api", "halo2-snark-aggregator-api",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"log", "log",
"rand 0.8.5", "rand 0.8.5",
"rand_core 0.6.3", "rand_core 0.6.3",
@@ -1966,7 +1966,7 @@ dependencies = [
"halo2-snark-aggregator-api", "halo2-snark-aggregator-api",
"halo2-snark-aggregator-circuit", "halo2-snark-aggregator-circuit",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"log", "log",
"num-bigint", "num-bigint",
"sha3 0.10.1", "sha3 0.10.1",
@@ -2015,7 +2015,7 @@ dependencies = [
"cfg-if 0.1.10", "cfg-if 0.1.10",
"ff", "ff",
"group", "group",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"log", "log",
"num-bigint", "num-bigint",
"num-integer", "num-integer",
@@ -2030,7 +2030,7 @@ dependencies = [
[[package]] [[package]]
name = "halo2curves" name = "halo2curves"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19" source = "git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1#9b67e19bca30a35208b0c1b41c1723771e2c9f49"
dependencies = [ dependencies = [
"ff", "ff",
"group", "group",
@@ -3245,7 +3245,7 @@ version = "0.2.0"
source = "git+https://github.com/scroll-tech/poseidon.git?branch=scroll-dev-0220#2fb4a2385bada39b50dce12fe50cb80d2fd33476" source = "git+https://github.com/scroll-tech/poseidon.git?branch=scroll-dev-0220#2fb4a2385bada39b50dce12fe50cb80d2fd33476"
dependencies = [ dependencies = [
"group", "group",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"subtle", "subtle",
] ]
@@ -4121,7 +4121,7 @@ source = "git+https://github.com/privacy-scaling-explorations/snark-verifier?tag
dependencies = [ dependencies = [
"ecc", "ecc",
"halo2_proofs", "halo2_proofs",
"halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations//halo2curves.git?rev=9b67e19)", "halo2curves 0.3.1 (git+https://github.com/privacy-scaling-explorations/halo2curves.git?tag=0.3.1)",
"hex", "hex",
"itertools", "itertools",
"lazy_static", "lazy_static",
@@ -5051,7 +5051,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]] [[package]]
name = "types" name = "types"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/scroll-tech/scroll-prover?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a" source = "git+https://github.com/scroll-tech/scroll-zkevm?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"blake2", "blake2",
@@ -5727,7 +5727,7 @@ dependencies = [
[[package]] [[package]]
name = "zkevm" name = "zkevm"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/scroll-tech/scroll-prover?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a" source = "git+https://github.com/scroll-tech/scroll-zkevm?rev=78ab7a7#78ab7a770f7753fa88c8aab4969296f06e1c811a"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"blake2", "blake2",

View File

@@ -7,17 +7,14 @@ edition = "2021"
[lib] [lib]
crate-type = ["cdylib"] crate-type = ["cdylib"]
# `//` is used to skip https://github.com/rust-lang/cargo/issues/5478#issuecomment-522719793.
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = 'https://github.com/privacy-scaling-explorations//halo2curves.git', rev = "9b67e19" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"] [patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"] [patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" } poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[dependencies] [dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-prover", rev="78ab7a7" } zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", rev="78ab7a7" }
types = { git = "https://github.com/scroll-tech/scroll-prover", rev="78ab7a7" } types = { git = "https://github.com/scroll-tech/scroll-zkevm", rev="78ab7a7" }
halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2.git", tag = "v2022_09_10" } halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2.git", tag = "v2022_09_10" }
log = "0.4" log = "0.4"
@@ -35,4 +32,4 @@ debug-assertions = true
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
debug-assertions = true debug-assertions = true

View File

@@ -1,5 +1,5 @@
void init_prover(char *params_path, char *seed_path); init_prover(char *params_path, char *seed_path);
char* create_agg_proof(char *trace); char* create_agg_proof(char *trace);
char* create_agg_proof_multi(char *trace); char* create_agg_proof_multi(char *trace);
void init_verifier(char *params_path, char *agg_vk_path); init_verifier(char *params_path, char *agg_vk_path);
char verify_agg_proof(char *proof); char verify_agg_proof(char *proof);

236
common/types/batch.go Normal file
View File

@@ -0,0 +1,236 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/abi"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
if i == 0 {
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

143
common/types/batch_test.go Normal file
View File

@@ -0,0 +1,143 @@
package types
import (
"encoding/json"
"math/big"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &geth_types.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}
func TestNewBatchData(t *testing.T) {
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
parentBatch := &BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
StateRoot: "0x0000000000000000000000000000000000000000",
}
batchData1 := NewBatchData(parentBatch, []*WrappedBlock{wrappedBlock}, nil)
assert.NotNil(t, batchData1)
assert.NotNil(t, batchData1.Batch)
assert.Equal(t, "0xac4487c0d8f429dafda3c68cbb8983ac08af83c03c83c365d7df02864f80af37", batchData1.Hash().Hex())
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
parentBatch2 := &BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.Hex(),
}
batchData2 := NewBatchData(parentBatch2, []*WrappedBlock{wrappedBlock2}, nil)
assert.NotNil(t, batchData2)
assert.NotNil(t, batchData2.Batch)
assert.Equal(t, "0x8f1447573740b3e75b979879866b8ad02eecf88e1946275eb8cf14ab95876efc", batchData2.Hash().Hex())
}
func TestBatchDataTimestamp(t *testing.T) {
// Test case 1: when the batch data contains no blocks.
assert.Equal(t, uint64(0), (&BatchData{}).Timestamp())
// Test case 2: when the batch data contains blocks.
batchData := &BatchData{
Batch: abi.IScrollChainBatch{
Blocks: []abi.IScrollChainBlockContext{
{Timestamp: 123456789},
{Timestamp: 234567891},
},
},
}
assert.Equal(t, uint64(123456789), batchData.Timestamp())
}

View File

@@ -6,13 +6,9 @@ import (
"math" "math"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
) )
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash. // WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct { type WrappedBlock struct {
Header *types.Header `json:"header"` Header *types.Header `json:"header"`
@@ -26,7 +22,7 @@ type WrappedBlock struct {
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 { func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64 var lastQueueIndex *uint64
for _, txData := range w.Transactions { for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType { if txData.Type == 0x7E {
lastQueueIndex = &txData.Nonce lastQueueIndex = &txData.Nonce
} }
} }
@@ -63,74 +59,3 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
return bytes, nil return bytes, nil
} }
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -2,12 +2,34 @@
package types package types
import ( import (
"database/sql"
"fmt" "fmt"
"time"
) )
// L1BlockStatus represents current l1 block processing status
type L1BlockStatus int
// GasOracleStatus represents current gas oracle processing status // GasOracleStatus represents current gas oracle processing status
type GasOracleStatus int type GasOracleStatus int
const (
// L1BlockUndefined : undefined l1 block status
L1BlockUndefined L1BlockStatus = iota
// L1BlockPending represents the l1 block status is pending
L1BlockPending
// L1BlockImporting represents the l1 block status is importing
L1BlockImporting
// L1BlockImported represents the l1 block status is imported
L1BlockImported
// L1BlockFailed represents the l1 block status is failed
L1BlockFailed
)
const ( const (
// GasOracleUndefined : undefined gas oracle status // GasOracleUndefined : undefined gas oracle status
GasOracleUndefined GasOracleStatus = iota GasOracleUndefined GasOracleStatus = iota
@@ -25,21 +47,18 @@ const (
GasOracleFailed GasOracleFailed
) )
func (s GasOracleStatus) String() string { // L1BlockInfo is structure of stored l1 block
switch s { type L1BlockInfo struct {
case GasOracleUndefined: Number uint64 `json:"number" db:"number"`
return "GasOracleUndefined" Hash string `json:"hash" db:"hash"`
case GasOraclePending: HeaderRLP string `json:"header_rlp" db:"header_rlp"`
return "GasOraclePending" BaseFee uint64 `json:"base_fee" db:"base_fee"`
case GasOracleImporting:
return "GasOracleImporting" BlockStatus L1BlockStatus `json:"block_status" db:"block_status"`
case GasOracleImported: GasOracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
return "GasOracleImported"
case GasOracleFailed: ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"`
return "GasOracleFailed" OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
}
} }
// MsgStatus represents current layer1 transaction processing status // MsgStatus represents current layer1 transaction processing status
@@ -68,14 +87,50 @@ const (
MsgRelayFailed MsgRelayFailed
) )
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
QueueIndex uint64 `json:"queue_index" db:"queue_index"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
Nonce uint64 `json:"nonce" db:"nonce"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// BlockInfo is structure of stored `block_trace` without `trace`
type BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
BatchHash sql.NullString `json:"batch_hash" db:"batch_hash"`
TxNum uint64 `json:"tx_num" db:"tx_num"`
GasUsed uint64 `json:"gas_used" db:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
}
// RollerProveStatus is the roller prove status of a block batch (session) // RollerProveStatus is the roller prove status of a block batch (session)
type RollerProveStatus int32 type RollerProveStatus int32
const ( const (
// RollerProveStatusUndefined indicates an unknown roller proving status
RollerProveStatusUndefined RollerProveStatus = iota
// RollerAssigned indicates roller assigned but has not submitted proof // RollerAssigned indicates roller assigned but has not submitted proof
RollerAssigned RollerAssigned RollerProveStatus = iota
// RollerProofValid indicates roller has submitted valid proof // RollerProofValid indicates roller has submitted valid proof
RollerProofValid RollerProofValid
// RollerProofInvalid indicates roller has submitted invalid proof // RollerProofInvalid indicates roller has submitted invalid proof
@@ -95,25 +150,27 @@ func (s RollerProveStatus) String() string {
} }
} }
// ProverTaskFailureType the type of prover task failure // RollerStatus is the roller name and roller prove status
type ProverTaskFailureType int type RollerStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status RollerProveStatus `json:"status"`
}
const ( // SessionInfo is assigned rollers info of a block batch (session)
// ProverTaskFailureTypeUndefined indicates an unknown roller failure type type SessionInfo struct {
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota ID int `json:"id" db:"id"`
// ProverTaskFailureTypeTimeout prover task failure of timeout TaskID string `json:"task_id" db:"task_id"`
ProverTaskFailureTypeTimeout RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
) ProveType int16 `json:"prove_type" db:"prove_type"`
RollerName string `json:"roller_name" db:"roller_name"`
func (r ProverTaskFailureType) String() string { ProvingStatus int16 `json:"proving_status" db:"proving_status"`
switch r { FailureType int16 `json:"failure_type" db:"failure_type"`
case ProverTaskFailureTypeUndefined: Reward uint64 `json:"reward" db:"reward"`
return "prover task failure undefined" Proof []byte `json:"proof" db:"proof"`
case ProverTaskFailureTypeTimeout: CreatedAt *time.Time `json:"created_at" db:"created_at"`
return "prover task failure timeout" UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
default: DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
return "illegal prover task failure type"
}
} }
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted) // ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -124,6 +181,8 @@ const (
ProvingStatusUndefined ProvingStatus = iota ProvingStatusUndefined ProvingStatus = iota
// ProvingTaskUnassigned : proving_task is not assigned to be proved // ProvingTaskUnassigned : proving_task is not assigned to be proved
ProvingTaskUnassigned ProvingTaskUnassigned
// ProvingTaskSkipped : proving_task is skipped for proof generation
ProvingTaskSkipped
// ProvingTaskAssigned : proving_task is assigned to be proved // ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover // ProvingTaskProved : proof has been returned by prover
@@ -138,6 +197,8 @@ func (ps ProvingStatus) String() string {
switch ps { switch ps {
case ProvingTaskUnassigned: case ProvingTaskUnassigned:
return "unassigned" return "unassigned"
case ProvingTaskSkipped:
return "skipped"
case ProvingTaskAssigned: case ProvingTaskAssigned:
return "assigned" return "assigned"
case ProvingTaskProved: case ProvingTaskProved:
@@ -147,32 +208,7 @@ func (ps ProvingStatus) String() string {
case ProvingTaskFailed: case ProvingTaskFailed:
return "failed" return "failed"
default: default:
return fmt.Sprintf("Undefined (%d)", int32(ps)) return "undefined"
}
}
// ChunkProofsStatus describes the proving status of chunks that belong to a batch.
type ChunkProofsStatus int
const (
// ChunkProofsStatusUndefined represents an undefined chunk proofs status
ChunkProofsStatusUndefined ChunkProofsStatus = iota
// ChunkProofsStatusPending means that some chunks that belong to this batch have not been proven
ChunkProofsStatusPending
// ChunkProofsStatusReady means that all chunks that belong to this batch have been proven
ChunkProofsStatusReady
)
func (s ChunkProofsStatus) String() string {
switch s {
case ChunkProofsStatusPending:
return "ChunkProofsStatusPending"
case ChunkProofsStatusReady:
return "ChunkProofsStatusReady"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
} }
} }
@@ -192,29 +228,51 @@ const (
RollupFinalizing RollupFinalizing
// RollupFinalized : finalize transaction is confirmed to layer1 // RollupFinalized : finalize transaction is confirmed to layer1
RollupFinalized RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
// RollupCommitFailed : rollup commit transaction confirmed but failed // RollupCommitFailed : rollup commit transaction confirmed but failed
RollupCommitFailed RollupCommitFailed
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed // RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
RollupFinalizeFailed RollupFinalizeFailed
) )
func (s RollupStatus) String() string { // BlockBatch is structure of stored block_batch
switch s { type BlockBatch struct {
case RollupPending: Hash string `json:"hash" db:"hash"`
return "RollupPending" Index uint64 `json:"index" db:"index"`
case RollupCommitting: ParentHash string `json:"parent_hash" db:"parent_hash"`
return "RollupCommitting" StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
case RollupCommitted: StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
return "RollupCommitted" EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
case RollupFinalizing: EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
return "RollupFinalizing" StateRoot string `json:"state_root" db:"state_root"`
case RollupFinalized: TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
return "RollupFinalized" TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
case RollupCommitFailed: TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
return "RollupCommitFailed" ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
case RollupFinalizeFailed: Proof []byte `json:"proof" db:"proof"`
return "RollupFinalizeFailed" ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
default: RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
return fmt.Sprintf("Undefined (%d)", int32(s)) OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
} CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}
// AggTask is a wrapper type around db AggProveTask type.
type AggTask struct {
ID string `json:"id" db:"id"`
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
} }

Some files were not shown because too many files have changed in this diff Show More