Compare commits

...

9 Commits

Author SHA1 Message Date
colinlyguo
20232336ca tweak 2023-05-31 01:10:06 +08:00
colinlyguo
332b2557b6 tweak 2023-05-31 00:50:36 +08:00
Xi Lin
d743f2ce96 feat(contracts): add fallback contract to deployed in L2 (#522)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-05-30 08:31:12 +08:00
maskpp
798179ee6d fix(mock): fix mocked proof to pass SanityCheck (#524) 2023-05-30 08:23:57 +08:00
maskpp
b706cb69d3 fix(db): return same amount of status as hashes for GetRollupStatusByHashList (#523) 2023-05-29 15:46:01 +08:00
Xi Lin
aa24cdd1db feat(contracts): add chain id in public input hash (#517)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-05-28 09:41:38 +08:00
HAOYUatHZ
4398a36ee2 feat(db): update block_batch's proof (#520) 2023-05-27 15:53:38 +08:00
HAOYUatHZ
9a27499c03 feat(db): rename created_time to created_at & updated_time to `… (#502)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-05-27 06:25:19 +08:00
georgehao
9d2a41a231 refactor(bridge): update bridge orm and layout (#496)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-05-26 12:23:29 +08:00
94 changed files with 16954 additions and 1337 deletions

View File

@@ -1,4 +1,4 @@
package bridgeabi_test package bridgeabi
import ( import (
"math/big" "math/big"
@@ -6,37 +6,35 @@ import (
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
) )
func TestEventSignature(t *testing.T) { func TestEventSignature(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e")) assert.Equal(L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c")) assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f")) assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62")) assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79")) assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
assert.Equal(bridge_abi.L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439")) assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e")) assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c")) assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f")) assert.Equal(L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745")) assert.Equal(L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693")) assert.Equal(L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
} }
func TestPackRelayL2MessageWithProof(t *testing.T) { func TestPackRelayL2MessageWithProof(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l1MessengerABI, err := bridge_abi.L1ScrollMessengerMetaData.GetAbi() l1MessengerABI, err := L1ScrollMessengerMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{ proof := IL1ScrollMessengerL2MessageProof{
BatchHash: common.Hash{}, BatchHash: common.Hash{},
MerkleProof: make([]byte, 0), MerkleProof: make([]byte, 0),
} }
@@ -47,10 +45,10 @@ func TestPackRelayL2MessageWithProof(t *testing.T) {
func TestPackCommitBatch(t *testing.T) { func TestPackCommitBatch(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
scrollChainABI, err := bridge_abi.ScrollChainMetaData.GetAbi() scrollChainABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
header := bridge_abi.IScrollChainBlockContext{ header := IScrollChainBlockContext{
BlockHash: common.Hash{}, BlockHash: common.Hash{},
ParentHash: common.Hash{}, ParentHash: common.Hash{},
BlockNumber: 0, BlockNumber: 0,
@@ -61,8 +59,8 @@ func TestPackCommitBatch(t *testing.T) {
NumL1Messages: 0, NumL1Messages: 0,
} }
batch := bridge_abi.IScrollChainBatch{ batch := IScrollChainBatch{
Blocks: []bridge_abi.IScrollChainBlockContext{header}, Blocks: []IScrollChainBlockContext{header},
PrevStateRoot: common.Hash{}, PrevStateRoot: common.Hash{},
NewStateRoot: common.Hash{}, NewStateRoot: common.Hash{},
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
@@ -77,7 +75,7 @@ func TestPackCommitBatch(t *testing.T) {
func TestPackFinalizeBatchWithProof(t *testing.T) { func TestPackFinalizeBatchWithProof(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l1RollupABI, err := bridge_abi.ScrollChainMetaData.GetAbi() l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
proof := make([]*big.Int, 10) proof := make([]*big.Int, 10)
@@ -94,7 +92,7 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
func TestPackRelayL1Message(t *testing.T) { func TestPackRelayL1Message(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l2MessengerABI, err := bridge_abi.L2ScrollMessengerMetaData.GetAbi() l2MessengerABI, err := L2ScrollMessengerMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0)) _, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
@@ -104,7 +102,7 @@ func TestPackRelayL1Message(t *testing.T) {
func TestPackSetL1BaseFee(t *testing.T) { func TestPackSetL1BaseFee(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l1GasOracleABI, err := bridge_abi.L1GasPriceOracleMetaData.GetAbi() l1GasOracleABI, err := L1GasPriceOracleMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
baseFee := big.NewInt(2333) baseFee := big.NewInt(2333)
@@ -115,7 +113,7 @@ func TestPackSetL1BaseFee(t *testing.T) {
func TestPackSetL2BaseFee(t *testing.T) { func TestPackSetL2BaseFee(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l2GasOracleABI, err := bridge_abi.L2GasPriceOracleMetaData.GetAbi() l2GasOracleABI, err := L2GasPriceOracleMetaData.GetAbi()
assert.NoError(err) assert.NoError(err)
baseFee := big.NewInt(2333) baseFee := big.NewInt(2333)
@@ -126,7 +124,7 @@ func TestPackSetL2BaseFee(t *testing.T) {
func TestPackImportBlock(t *testing.T) { func TestPackImportBlock(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
l1BlockContainerABI := bridge_abi.L1BlockContainerABI l1BlockContainerABI := L1BlockContainerABI
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false) _, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
assert.NoError(err) assert.NoError(err)

View File

@@ -11,36 +11,29 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
cutils "scroll-tech/common/utils"
) )
var ( var app *cli.App
app *cli.App
)
func init() { func init() {
// Set up event-watcher app info. // Set up event-watcher app info.
app = cli.NewApp() app = cli.NewApp()
app.Action = action app.Action = action
app.Name = "event-watcher" app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher" app.Usage = "The Scroll Event Watcher"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `event-watcher-test` app for integration-test. // Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp) cutils.RegisterSimulation(app, cutils.EventWatcherApp)
} }
@@ -55,15 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
var ormFactory database.OrmFactory db, err := utils.InitDB(cfg.DBConfig)
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
err = ormFactory.Close() if err = utils.CloseDB(db); err != nil {
if err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -81,8 +72,8 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err) log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err return err
} }
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory) l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go cutils.Loop(subCtx, 10*time.Second, func() { go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil { if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {

View File

@@ -11,27 +11,21 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/utils"
cutils "scroll-tech/common/utils"
) )
var ( var app *cli.App
app *cli.App
)
func init() { func init() {
// Set up gas-oracle app info. // Set up gas-oracle app info.
app = cli.NewApp() app = cli.NewApp()
app.Action = action app.Action = action
app.Name = "gas-oracle" app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle" app.Usage = "The Scroll Gas Oracle"
@@ -39,11 +33,9 @@ func init() {
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `gas-oracle-test` app for integration-test. // Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp) cutils.RegisterSimulation(app, cutils.GasOracleApp)
} }
@@ -57,18 +49,17 @@ func action(ctx *cli.Context) error {
} }
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
var ormFactory database.OrmFactory db, err := utils.InitDB(cfg.DBConfig)
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
err = ormFactory.Close() if err = utils.CloseDB(db); err != nil {
if err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
// Start metrics server. // Start metrics server.
metrics.Serve(subCtx, ctx) metrics.Serve(subCtx, ctx)
@@ -85,14 +76,14 @@ func action(ctx *cli.Context) error {
return err return err
} }
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig) l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil { if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err) log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err return err
} }
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil { if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err return err

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
) )
// MockApp mockApp-test client manager. // MockApp mockApp-test client manager.

View File

@@ -11,25 +11,20 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/utils"
cutils "scroll-tech/common/utils"
) )
var ( var app *cli.App
app *cli.App
)
func init() { func init() {
// Set up message-relayer app info. // Set up message-relayer app info.
app = cli.NewApp() app = cli.NewApp()
app.Action = action app.Action = action
app.Name = "message-relayer" app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer" app.Usage = "The Scroll Message Relayer"
@@ -37,11 +32,9 @@ func init() {
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
// Register `message-relayer-test` app for integration-test. // Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp) cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
} }
@@ -53,18 +46,16 @@ func action(ctx *cli.Context) error {
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
} }
subCtx, cancel := context.WithCancel(ctx.Context)
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
var ormFactory database.OrmFactory db, err := utils.InitDB(cfg.DBConfig)
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
err = ormFactory.Close() if err = utils.CloseDB(db); err != nil {
if err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -79,12 +70,12 @@ func action(ctx *cli.Context) error {
return err return err
} }
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig) l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil { if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err) log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err return err
} }
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil { if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err return err

View File

@@ -11,34 +11,27 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/utils"
cutils "scroll-tech/common/utils"
) )
var ( var app *cli.App
app *cli.App
)
func init() { func init() {
// Set up rollup-relayer app info. // Set up rollup-relayer app info.
app = cli.NewApp() app = cli.NewApp()
app.Action = action app.Action = action
app.Name = "rollup-relayer" app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer" app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return cutils.LogSetup(ctx)
} }
@@ -55,16 +48,14 @@ func action(ctx *cli.Context) error {
} }
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
// init db connection db, err := utils.InitDB(cfg.DBConfig)
var ormFactory database.OrmFactory if err != nil {
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
err = ormFactory.Close() if err = utils.CloseDB(db); err != nil {
if err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -79,19 +70,19 @@ func action(ctx *cli.Context) error {
return err return err
} }
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil { if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err return err
} }
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory) batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, db)
if err != nil { if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err) log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err return err
} }
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory) l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks // Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {

View File

@@ -4,13 +4,18 @@ go 1.18
require ( require (
github.com/agiledragon/gomonkey/v2 v2.9.0 github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61 github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61
github.com/smartystreets/goconvey v1.8.0 github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0 golang.org/x/sync v0.1.0
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.1
modernc.org/mathutil v1.4.1 modernc.org/mathutil v1.4.1
) )
@@ -21,19 +26,27 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
@@ -51,8 +64,9 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.9.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/sys v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect golang.org/x/tools v0.8.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View File

@@ -29,10 +29,14 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -43,21 +47,38 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -65,6 +86,9 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -72,8 +96,10 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -87,6 +113,8 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
@@ -95,6 +123,7 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -118,8 +147,11 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -133,37 +165,68 @@ github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bC
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
@@ -173,5 +236,19 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=

View File

@@ -4,15 +4,13 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/database"
) )
// Config load configuration items. // Config load configuration items.
type Config struct { type Config struct {
L1Config *L1Config `json:"l1_config"` L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"` L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"` DBConfig *DBConfig `json:"db_config"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.

View File

@@ -12,7 +12,7 @@ import (
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {
t.Run("Success Case", func(t *testing.T) { t.Run("Success Case", func(t *testing.T) {
cfg, err := NewConfig("../config.json") cfg, err := NewConfig("../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1) assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)

View File

@@ -0,0 +1,33 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
)
// DBConfig db config
type DBConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driver_name"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
}
// NewDBConfig returns a new instance of Config.
func NewDBConfig(file string) (*DBConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &DBConfig{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types" "scroll-tech/bridge/internal/types"
) )
// L2Config loads l2geth configuration items. // L2Config loads l2geth configuration items.

View File

@@ -0,0 +1,20 @@
package relayer
import "errors"
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)
var (
// ErrExecutionRevertedMessageExpired error of Message expired
ErrExecutionRevertedMessageExpired = errors.New("execution reverted: Message expired")
// ErrExecutionRevertedAlreadySuccessExecuted error of Message was already successfully executed
ErrExecutionRevertedAlreadySuccessExecuted = errors.New("execution reverted: Message was already successfully executed")
)

View File

@@ -6,27 +6,25 @@ import (
"math/big" "math/big"
// not sure if this will make problems when relay with l1geth // not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
) )
var ( var (
bridgeL1MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
) )
// Layer1Relayer is responsible for // Layer1Relayer is responsible for
@@ -38,7 +36,6 @@ var (
type Layer1Relayer struct { type Layer1Relayer struct {
ctx context.Context ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig cfg *config.RelayerConfig
// channel used to communicate with transaction sender // channel used to communicate with transaction sender
@@ -53,10 +50,13 @@ type Layer1Relayer struct {
lastGasPrice uint64 lastGasPrice uint64
minGasPrice uint64 minGasPrice uint64
gasPriceDiff uint64 gasPriceDiff uint64
l1MessageOrm *orm.L1Message
l1Block *orm.L1Block
} }
// NewLayer1Relayer will return a new instance of Layer1RelayerClient // NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) { func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys) messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil { if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey) addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
@@ -88,14 +88,15 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
} }
l1Relayer := &Layer1Relayer{ l1Relayer := &Layer1Relayer{
ctx: ctx, ctx: ctx,
db: db, l1MessageOrm: orm.NewL1Message(db),
l1Block: orm.NewL1Block(db),
messageSender: messageSender, messageSender: messageSender,
l2MessengerABI: bridge_abi.L2ScrollMessengerABI, l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender, gasOracleSender: gasOracleSender,
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI, l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay, minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -112,7 +113,7 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain // ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() { func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order // msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(types.MsgPending, 100) msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
if err != nil { if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err) log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return return
@@ -123,7 +124,8 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
} }
for _, msg := range msgs { for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil { tmpMsg := msg
if err = r.processSavedEvent(&tmpMsg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err) log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
} }
@@ -132,15 +134,15 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
} }
} }
func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error { func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
calldata := common.Hex2Bytes(msg.Calldata) calldata := common.Hex2Bytes(msg.Calldata)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay) hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" { if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired) return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
} }
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed) if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
} }
if err != nil { if err != nil {
return err return err
@@ -148,7 +150,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
bridgeL1MsgsRelayedTotalCounter.Inc(1) bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash) log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String()) err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil { if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err) log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
} }
@@ -157,17 +159,17 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2 // ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() { func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.db.GetLatestL1BlockHeight() latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return return
} }
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{ blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{
"number": latestBlockHeight, "number": latestBlockHeight,
}) })
if err != nil { if err != nil {
log.Error("Failed to GetL1BlockInfos from db", "height", latestBlockHeight, "err", err) log.Error("Failed to GetL1Blocks from db", "height", latestBlockHeight, "err", err)
return return
} }
if len(blocks) != 1 { if len(blocks) != 1 {
@@ -176,7 +178,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
} }
block := blocks[0] block := blocks[0]
if block.GasOracleStatus == types.GasOraclePending { if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff) // last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) { if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
@@ -195,7 +197,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return return
} }
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String()) err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil { if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err) log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return return
@@ -214,14 +216,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.messageSender.ConfirmChan(): case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1) bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String()) err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err) log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String()) err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err) log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
} }
@@ -230,14 +232,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -9,20 +9,20 @@ import (
"github.com/agiledragon/gomonkey/v2" "github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
templateL1Message = []*types.L1Message{ templateL1Message = []*orm.L1Message{
{ {
QueueIndex: 1, QueueIndex: 1,
MsgHash: "msg_hash1", MsgHash: "msg_hash1",
@@ -48,59 +48,58 @@ var (
} }
) )
func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
// testCreateNewRelayer test create new relayer instance and stop // testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) { func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
} }
func testL1RelayerProcessSaveEvents(t *testing.T) { func testL1RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err) l1MessageOrm := orm.NewL1Message(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.SaveL1Messages(context.Background(), templateL1Message)) assert.NotNil(t, relayer)
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
relayer.ProcessSavedEvents() relayer.ProcessSavedEvents()
msg1, err := db.GetL1MessageByQueueIndex(1) msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg1.Status, types.MsgSubmitted) assert.Equal(t, types.MsgStatus(msg1.Status), types.MsgSubmitted)
msg2, err := db.GetL1MessageByQueueIndex(2) msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg2.Status, types.MsgSubmitted) assert.Equal(t, types.MsgStatus(msg2.Status), types.MsgSubmitted)
} }
func testL1RelayerMsgConfirm(t *testing.T) { func testL1RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL1Messages(context.Background(),
[]*types.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}))
// Create and set up the Layer1 Relayer. // Create and set up the Layer1 Relayer.
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig) l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, l1Relayer)
// Simulate message confirmations. // Simulate message confirmations.
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{ l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
@@ -114,27 +113,25 @@ func testL1RelayerMsgConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1MessageByMsgHash("msg-1") msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
msg2, err2 := db.GetL1MessageByMsgHash("msg-2") msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed && return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
}) })
assert.True(t, ok) assert.True(t, ok)
} }
func testL1RelayerGasOracleConfirm(t *testing.T) { func testL1RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err) l1BlockOrm := orm.NewL1Block(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}
// Insert test data. // Insert test data.
assert.NoError(t, db.InsertL1Blocks(context.Background(), assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
[]*types.L1BlockInfo{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}))
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
@@ -142,7 +139,6 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
defer cancel() defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig) l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, l1Relayer)
// Simulate message confirmations. // Simulate message confirmations.
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{ l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
@@ -156,19 +152,17 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"}) msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"}) msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && msg1[0].GasOracleStatus == types.GasOracleImported && return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && msg2[0].GasOracleStatus == types.GasOracleFailed err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
}) })
assert.True(t, ok) assert.True(t, ok)
} }
func testL1RelayerProcessGasPriceOracle(t *testing.T) { func testL1RelayerProcessGasPriceOracle(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupL1RelayerDB(t)
assert.NoError(t, err) defer bridgeUtils.CloseDB(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -177,31 +171,32 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, l1Relayer) assert.NotNil(t, l1Relayer)
var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() { convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error") targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(db, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
return 0, targetErr return 0, targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard := gomonkey.ApplyMethodFunc(db, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
return 100, nil return 100, nil
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
convey.Convey("GetL1BlockInfos failure", t, func() { convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1BlockInfos error") targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(db, "GetL1BlockInfos", func(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr return nil, targetErr
}) })
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
convey.Convey("Block not exist", t, func() { convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(db, "GetL1BlockInfos", func(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []*types.L1BlockInfo{ tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1},
} }
@@ -210,12 +205,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(db, "GetL1BlockInfos", func(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []*types.L1BlockInfo{ tmpInfo := []orm.L1Block{
{ {
Hash: "gas-oracle-1", Hash: "gas-oracle-1",
Number: 0, Number: 0,
GasOracleStatus: types.GasOraclePending, GasOracleStatus: int(types.GasOraclePending),
}, },
} }
return tmpInfo, nil return tmpInfo, nil
@@ -247,13 +242,13 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
convey.Convey("UpdateL1GasOracleStatusAndOracleTxHash failure", t, func() { convey.Convey("UpdateL1GasOracleStatusAndOracleTxHash failure", t, func() {
targetErr := errors.New("UpdateL1GasOracleStatusAndOracleTxHash failure") targetErr := errors.New("UpdateL1GasOracleStatusAndOracleTxHash failure")
patchGuard.ApplyMethodFunc(db, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error { patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
return targetErr return targetErr
}) })
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(db, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error { patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
return nil return nil
}) })

View File

@@ -3,7 +3,6 @@ package relayer
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math/big" "math/big"
"runtime" "runtime"
"sync" "sync"
@@ -13,29 +12,30 @@ import (
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"gorm.io/gorm"
"modernc.org/mathutil" "modernc.org/mathutil"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
bridge_abi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/sender" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/utils"
) )
var ( var (
bridgeL2MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry) bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
) )
// Layer2Relayer is responsible for // Layer2Relayer is responsible for
@@ -49,7 +49,10 @@ type Layer2Relayer struct {
l2Client *ethclient.Client l2Client *ethclient.Client
db database.OrmFactory blockBatchOrm *orm.BlockBatch
blockTraceOrm *orm.BlockTrace
l2MessageOrm *orm.L2Message
cfg *config.RelayerConfig cfg *config.RelayerConfig
messageSender *sender.Sender messageSender *sender.Sender
@@ -81,7 +84,7 @@ type Layer2Relayer struct {
} }
// NewLayer2Relayer will return a new instance of Layer2RelayerClient // NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) { func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize // @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys) messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil { if err != nil {
@@ -118,18 +121,21 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
layer2Relayer := &Layer2Relayer{ layer2Relayer := &Layer2Relayer{
ctx: ctx, ctx: ctx,
db: db,
blockBatchOrm: orm.NewBlockBatch(db),
l2MessageOrm: orm.NewL2Message(db),
blockTraceOrm: orm.NewBlockTrace(db),
l2Client: l2Client, l2Client: l2Client,
messageSender: messageSender, messageSender: messageSender,
l1MessengerABI: bridge_abi.L1ScrollMessengerABI, l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
rollupSender: rollupSender, rollupSender: rollupSender,
l1RollupABI: bridge_abi.ScrollChainABI, l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender, gasOracleSender: gasOracleSender,
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI, l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay, minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -149,19 +155,23 @@ const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain // ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() { func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch() batch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus([]types.RollupStatus{types.RollupFinalized})
if err != nil { if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err) log.Error("GetLatestFinalizedBatch failed", "err", err)
return return
} }
// msgs are sorted by nonce in increasing order // msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages( fields := map[string]interface{}{
map[string]interface{}{"status": types.MsgPending}, "status": int(types.MsgPending),
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber), "height <= (?)": batch.EndBlockNumber,
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit), }
) orderByList := []string{
"nonce ASC",
}
limit := processMsgLimit
msgs, err := r.l2MessageOrm.GetL2Messages(fields, orderByList, limit)
if err != nil { if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err) log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return return
@@ -177,7 +187,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
for _, msg := range msgs[:size] { for _, msg := range msgs[:size] {
msg := msg msg := msg
g.Go(func() error { g.Go(func() error {
return r.processSavedEvent(msg) return r.processSavedEvent(&msg)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@@ -189,24 +199,28 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
} }
} }
func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error { func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
// @todo fetch merkle proof from l2geth // @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height) log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// Get the block info that contains the message // Get the block info that contains the message
blockInfos, err := r.db.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}) blockInfos, err := r.blockTraceOrm.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}, nil, 0)
if err != nil { if err != nil {
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height) log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
} }
if len(blockInfos) == 0 {
return errors.New("get block trace len is 0, exit")
}
blockInfo := blockInfos[0] blockInfo := blockInfos[0]
if !blockInfo.BatchHash.Valid { if blockInfo.BatchHash == "" {
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce) log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
return nil return nil
} }
// TODO: rebuild the withdraw trie to generate the merkle proof // TODO: rebuild the withdraw trie to generate the merkle proof
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{ proof := bridgeAbi.IL1ScrollMessengerL2MessageProof{
BatchHash: common.HexToHash(blockInfo.BatchHash.String), BatchHash: common.HexToHash(blockInfo.BatchHash),
MerkleProof: make([]byte, 0), MerkleProof: make([]byte, 0),
} }
from := common.HexToAddress(msg.Sender) from := common.HexToAddress(msg.Sender)
@@ -227,11 +241,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
} }
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay) hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" { if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired) return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
} }
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" { if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed) return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
} }
if err != nil { if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
@@ -244,7 +258,7 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// save status in db // save status in db
// @todo handle db error // @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String()) err = r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil { if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err) log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err return err
@@ -255,13 +269,13 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// ProcessGasPriceOracle imports gas price to layer1 // ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() { func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.db.GetLatestBatch() batch, err := r.blockBatchOrm.GetLatestBatch()
if err != nil { if err != nil {
log.Error("Failed to GetLatestBatch", "err", err) log.Error("Failed to GetLatestBatch", "err", err)
return return
} }
if batch.OracleStatus == types.GasOraclePending { if types.GasOracleStatus(batch.OracleStatus) == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx) suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil { if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err) log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
@@ -286,7 +300,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
return return
} }
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String()) err = r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil { if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err) log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return return
@@ -298,14 +312,14 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
} }
// SendCommitTx sends commitBatches tx to L1. // SendCommitTx sends commitBatches tx to L1.
func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error { func (r *Layer2Relayer) SendCommitTx(batchData []*bridgeTypes.BatchData) error {
if len(batchData) == 0 { if len(batchData) == 0 {
log.Error("SendCommitTx receives empty batch") log.Error("SendCommitTx receives empty batch")
return nil return nil
} }
// pack calldata // pack calldata
commitBatches := make([]bridge_abi.IScrollChainBatch, len(batchData)) commitBatches := make([]bridgeAbi.IScrollChainBatch, len(batchData))
for i, batch := range batchData { for i, batch := range batchData {
commitBatches[i] = batch.Batch commitBatches[i] = batch.Batch
} }
@@ -341,7 +355,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
batchHashes := make([]string, len(batchData)) batchHashes := make([]string, len(batchData))
for i, batch := range batchData { for i, batch := range batchData {
batchHashes[i] = batch.Hash().Hex() batchHashes[i] = batch.Hash().Hex()
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting) err = r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
if err != nil { if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err) log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
} }
@@ -353,7 +367,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
// ProcessCommittedBatches submit proof to layer 1 rollup contract // ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation // set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil { if count, err := r.blockBatchOrm.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err) log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway // continue anyway
} else if count > 0 { } else if count > 0 {
@@ -362,7 +376,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
} }
// batches are sorted by batch index in increasing order // batches are sorted by batch index in increasing order
batchHashes, err := r.db.GetCommittedBatches(1) batchHashes, err := r.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupCommitted, 1)
if err != nil { if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err) log.Error("Failed to fetch committed L2 batches", "err", err)
return return
@@ -373,7 +387,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
hash := batchHashes[0] hash := batchHashes[0]
// @todo add support to relay multiple batches // @todo add support to relay multiple batches
batches, err := r.db.GetBlockBatches(map[string]interface{}{"hash": hash}, "LIMIT 1") batches, err := r.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": hash}, nil, 1)
if err != nil { if err != nil {
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err) log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
return return
@@ -384,33 +398,31 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
} }
batch := batches[0] batch := batches[0]
status := batch.ProvingStatus status := types.ProvingStatus(batch.ProvingStatus)
switch status { switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet. // The proof for this block is not ready yet.
return return
case types.ProvingTaskProved: case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified // It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified. // the proof yet. We don't roll up the proof until it's verified.
return return
case types.ProvingTaskFailed, types.ProvingTaskSkipped: case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake // note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
case types.ProvingTaskVerified: case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash) log.Info("Start to roll up zk proof", "hash", hash)
success := false success := false
previousBatch, err := r.db.GetLatestFinalizingOrFinalizedBatch() rollupStatues := []types.RollupStatus{
types.RollupFinalizing,
types.RollupFinalized,
}
previousBatch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus(rollupStatues)
// skip submitting proof // skip submitting proof
if err == nil && uint64(batch.CreatedAt.Sub(*previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec { if err == nil && uint64(batch.CreatedAt.Sub(previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
log.Info( log.Info(
"Not enough time passed, skipping", "Not enough time passed, skipping",
"hash", hash, "hash", hash,
@@ -420,7 +432,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"lastFinalizingCreatedAt", previousBatch.CreatedAt, "lastFinalizingCreatedAt", previousBatch.CreatedAt,
) )
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} else { } else {
success = true success = true
@@ -430,7 +442,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
} }
// handle unexpected db error // handle unexpected db error
if err != nil && err.Error() != "sql: no rows in result set" { if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("Failed to get latest finalized batch", "err", err) log.Error("Failed to get latest finalized batch", "err", err)
return return
} }
@@ -439,33 +451,26 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// TODO: need to revisit this and have a more fine-grained error handling // TODO: need to revisit this and have a more fine-grained error handling
if !success { if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash) log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
} }
}() }()
proofBuffer, icBuffer, err := r.db.GetVerifiedProofAndInstanceCommitmentsByHash(hash) aggProof, err := r.blockBatchOrm.GetVerifiedProofByHash(hash)
if err != nil { if err != nil {
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err) log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if proofBuffer == nil || icBuffer == nil {
log.Warn("proof or instance not ready", "hash", hash)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
return
}
if len(icBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
return return
} }
proof := utils.BufferToUint256Le(proofBuffer) if err = aggProof.SanityCheck(); err != nil {
instance := utils.BufferToUint256Le(icBuffer) log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance) return
}
proof := utils.BufferToUint256Le(aggProof.Proof)
finalPair := utils.BufferToUint256Le(aggProof.FinalPair)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, finalPair)
if err != nil { if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err) log.Error("Pack finalizeBatchWithProof failed", "err", err)
return return
@@ -485,7 +490,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash) log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
// record and sync with db, @todo handle db error // record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing) err = r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil { if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err) log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
} }
@@ -512,7 +517,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
} }
// @todo handle db error // @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String()) err := r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err) log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
} }
@@ -533,7 +538,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
} }
for _, batchHash := range batchHashes { for _, batchHash := range batchHashes {
// @todo handle db error // @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status) err := r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
if err != nil { if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err) log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
} }
@@ -553,7 +558,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
} }
// @todo handle db error // @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status) err := r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil { if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err) log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
} }
@@ -575,14 +580,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -12,21 +12,24 @@ import (
"github.com/agiledragon/gomonkey/v2" "github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" "scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/database/migrate" bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
templateL2Message = []*types.L2Message{ templateL2Message = []orm.L2Message{
{ {
Nonce: 1, Nonce: 1,
Height: 1, Height: 1,
@@ -39,144 +42,174 @@ var (
} }
) )
func testCreateNewRelayer(t *testing.T) { func setupL2RelayerDB(t *testing.T) *gorm.DB {
// Create db handler and reset db. db, err := bridgeUtils.InitDB(cfg.DBConfig)
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB)) sqlDB, err := db.DB()
defer db.Close() assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
} }
func testL2RelayerProcessSaveEvents(t *testing.T) { func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
err = db.SaveL2Messages(context.Background(), templateL2Message) l2MessageOrm := orm.NewL2Message(db)
err = l2MessageOrm.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err) assert.NoError(t, err)
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)), Number: big.NewInt(int64(templateL2Message[0].Height)),
}, },
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)), Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
}, },
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{ blockTraceOrm := orm.NewBlockTrace(db)
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
blockBatchOrm := orm.NewBlockBatch(db)
parentBatch1 := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: common.Hash{}.Hex(), Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(), StateRoot: common.Hash{}.Hex(),
} }
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil) batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex() batchHash := batchData1.Hash().Hex()
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{1}, batchHash)) err = db.Transaction(func(tx *gorm.DB) error {
assert.NoError(t, dbTx.Commit()) rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, []uint64{1}, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized) err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessSavedEvents() relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce) msg, err := l2MessageOrm.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.MsgSubmitted, msg.Status) assert.Equal(t, types.MsgSubmitted, types.MsgStatus(msg.Status))
} }
func testL2RelayerProcessCommittedBatches(t *testing.T) { func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
parentBatch1 := &types.BlockBatch{ parentBatch1 := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: common.Hash{}.Hex(), Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(), StateRoot: common.Hash{}.Hex(),
} }
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
assert.NoError(t, err) batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex() batchHash := batchData1.Hash().Hex()
err = dbTx.Commit() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted) err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} proof := &message.AggProof{
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessCommittedBatches() relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchHash) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
} }
func testL2RelayerSkipBatches(t *testing.T) { func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
blockBatchOrm := orm.NewBlockBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string { createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchData := genBatchData(t, index) batchData := genBatchData(t, index)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData)) err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
return nil
})
assert.NoError(t, err)
batchHash := batchData.Hash().Hex() batchHash := batchData.Hash().Hex()
err = dbTx.Commit() err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, rollupStatus) proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, provingStatus)
assert.NoError(t, err)
return batchHash return batchHash
} }
@@ -200,29 +233,30 @@ func testL2RelayerSkipBatches(t *testing.T) {
relayer.ProcessCommittedBatches() relayer.ProcessCommittedBatches()
for _, id := range skipped { for _, id := range skipped {
status, err := db.GetRollupStatus(id) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
} }
for _, id := range notSkipped { for _, id := range notSkipped {
status, err := db.GetRollupStatus(id) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEqual(t, types.RollupFinalizationSkipped, status) assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
} }
} }
func testL2RelayerMsgConfirm(t *testing.T) { func testL2RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
l2MessageOrm := orm.NewL2Message(db)
insertL2Messages := []orm.L2Message{
{MsgHash: "msg-1", Nonce: 0},
{MsgHash: "msg-2", Nonce: 1},
}
err := l2MessageOrm.SaveL2Messages(context.Background(), insertL2Messages)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL2Messages(context.Background(), []*types.L2Message{
{MsgHash: "msg-1", Nonce: 0}, {MsgHash: "msg-2", Nonce: 1},
}))
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -245,32 +279,46 @@ func testL2RelayerMsgConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
assert.True(t, utils.TryTimes(5, func() bool { assert.True(t, utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL2MessageByMsgHash("msg-1") fields1 := map[string]interface{}{"msg_hash": "msg-1"}
msg2, err2 := db.GetL2MessageByMsgHash("msg-2") msg1, err1 := l2MessageOrm.GetL2Messages(fields1, nil, 0)
return err1 == nil && msg1.Status == types.MsgConfirmed && if len(msg1) != 1 {
err2 == nil && msg2.Status == types.MsgRelayFailed return false
}
fields2 := map[string]interface{}{"msg_hash": "msg-2"}
msg2, err2 := l2MessageOrm.GetL2Messages(fields2, nil, 0)
if len(msg2) != 1 {
return false
}
return err1 == nil && types.MsgStatus(msg1[0].Status) == types.MsgConfirmed &&
err2 == nil && types.MsgStatus(msg2[0].Status) == types.MsgRelayFailed
})) }))
} }
func testL2RelayerRollupConfirm(t *testing.T) { func testL2RelayerRollupConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data. // Insert test data.
batches := make([]*types.BatchData, 6) batches := make([]*bridgeTypes.BatchData, 6)
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
batches[i] = genBatchData(t, uint64(i)) batches[i] = genBatchData(t, uint64(i))
} }
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
err := db.Transaction(func(tx *gorm.DB) error {
for _, batch := range batches {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -314,8 +362,8 @@ func testL2RelayerRollupConfirm(t *testing.T) {
} }
for i, batch := range batches[:6] { for i, batch := range batches[:6] {
batchInDB, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}) batchInDB, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
if err != nil || len(batchInDB) != 1 || batchInDB[0].RollupStatus != expectedStatuses[i] { if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
return false return false
} }
} }
@@ -325,24 +373,30 @@ func testL2RelayerRollupConfirm(t *testing.T) {
} }
func testL2RelayerGasOracleConfirm(t *testing.T) { func testL2RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data. // Insert test data.
batches := make([]*types.BatchData, 2) batches := make([]*bridgeTypes.BatchData, 2)
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
batches[i] = genBatchData(t, uint64(i)) batches[i] = genBatchData(t, uint64(i))
} }
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
err := db.Transaction(func(tx *gorm.DB) error {
for _, batch := range batches {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -364,8 +418,8 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed} expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, batch := range batches { for i, batch := range batches {
gasOracle, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}) gasOracle, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
if err != nil || len(gasOracle) != 1 || gasOracle[0].OracleStatus != expectedStatuses[i] { if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
return false return false
} }
} }
@@ -374,43 +428,42 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
assert.True(t, ok) assert.True(t, ok)
} }
func genBatchData(t *testing.T, index uint64) *types.BatchData { func genBatchData(t *testing.T, index uint64) *bridgeTypes.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock := &types.WrappedBlock{} wrappedBlock := &bridgeTypes.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock) err = json.Unmarshal(templateBlockTrace, wrappedBlock)
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16)) wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
parentBatch := &types.BlockBatch{ parentBatch := &bridgeTypes.BatchInfo{
Index: index, Index: index,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
return types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil) return bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{wrappedBlock}, nil)
} }
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupL2RelayerDB(t)
assert.NoError(t, err) defer bridgeUtils.CloseDB(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
var blockBatchOrm *orm.BlockBatch
convey.Convey("Failed to GetLatestBatch", t, func() { convey.Convey("Failed to GetLatestBatch", t, func() {
targetErr := errors.New("GetLatestBatch error") targetErr := errors.New("GetLatestBatch error")
patchGuard := gomonkey.ApplyMethodFunc(db, "GetLatestBatch", func() (*types.BlockBatch, error) { patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) {
return nil, targetErr return nil, targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
relayer.ProcessGasPriceOracle() relayer.ProcessGasPriceOracle()
}) })
patchGuard := gomonkey.ApplyMethodFunc(db, "GetLatestBatch", func() (*types.BlockBatch, error) { patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) {
batch := types.BlockBatch{ batch := orm.BlockBatch{
OracleStatus: types.GasOraclePending, OracleStatus: int(types.GasOraclePending),
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
return &batch, nil return &batch, nil
@@ -455,42 +508,40 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() { convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error") targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
patchGuard.ApplyMethodFunc(db, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return targetErr return targetErr
}) })
relayer.ProcessGasPriceOracle() relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(db, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return nil return nil
}) })
relayer.ProcessGasPriceOracle() relayer.ProcessGasPriceOracle()
} }
func testLayer2RelayerSendCommitTx(t *testing.T) { func testLayer2RelayerSendCommitTx(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupL2RelayerDB(t)
assert.NoError(t, err) defer bridgeUtils.CloseDB(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
var batchDataList []*types.BatchData var batchDataList []*bridgeTypes.BatchData
convey.Convey("SendCommitTx receives empty batch", t, func() { convey.Convey("SendCommitTx receives empty batch", t, func() {
err = relayer.SendCommitTx(batchDataList) err = relayer.SendCommitTx(batchDataList)
assert.NoError(t, err) assert.NoError(t, err)
}) })
parentBatch := &types.BlockBatch{ parentBatch := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1000), Number: big.NewInt(1000),
ParentHash: common.Hash{}, ParentHash: common.Hash{},
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
@@ -501,8 +552,8 @@ func testLayer2RelayerSendCommitTx(t *testing.T) {
}, },
} }
blocks := []*types.WrappedBlock{traces[0]} blocks := []*bridgeTypes.WrappedBlock{traces[0]}
tmpBatchData := types.NewBatchData(parentBatch, blocks, cfg.L2Config.BatchProposerConfig.PublicInputConfig) tmpBatchData := bridgeTypes.NewBatchData(parentBatch, blocks, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchDataList = append(batchDataList, tmpBatchData) batchDataList = append(batchDataList, tmpBatchData)
var s abi.ABI var s abi.ABI
@@ -535,16 +586,17 @@ func testLayer2RelayerSendCommitTx(t *testing.T) {
return common.HexToHash("0x56789abcdef1234"), nil return common.HexToHash("0x56789abcdef1234"), nil
}) })
var blockBatchOrm *orm.BlockBatch
convey.Convey("UpdateCommitTxHashAndRollupStatus failed", t, func() { convey.Convey("UpdateCommitTxHashAndRollupStatus failed", t, func() {
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus failure") targetErr := errors.New("UpdateCommitTxHashAndRollupStatus failure")
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
return targetErr return targetErr
}) })
err = relayer.SendCommitTx(batchDataList) err = relayer.SendCommitTx(batchDataList)
assert.NoError(t, err) assert.NoError(t, err)
}) })
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
return nil return nil
}) })
err = relayer.SendCommitTx(batchDataList) err = relayer.SendCommitTx(batchDataList)

View File

@@ -10,9 +10,9 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -25,60 +25,65 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
// batch data // batch data
batchData1 *types.BatchData batchData1 *bridgeTypes.BatchData
batchData2 *types.BatchData batchData2 *bridgeTypes.BatchData
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
// Load config. // Load config.
cfg, err = config.NewConfig("../config.json") cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
base.RunImages(t) base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig cfg.DBConfig = &config.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = base.L2Client()
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
parentBatch1 := &types.BlockBatch{ parentBatch1 := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729", Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d", StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
} }
batchData1 = types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil) batchData1 = bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
parentBatch2 := &types.BlockBatch{ parentBatch2 := &bridgeTypes.BatchInfo{
Index: batchData1.Batch.BatchIndex, Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(), Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(), StateRoot: batchData1.Batch.NewStateRoot.String(),
} }
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil) batchData2 = bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex()) log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())

View File

@@ -17,8 +17,8 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/utils"
) )
const ( const (

View File

@@ -9,8 +9,6 @@ import (
"testing" "testing"
"time" "time"
"golang.org/x/sync/errgroup"
"github.com/agiledragon/gomonkey/v2" "github.com/agiledragon/gomonkey/v2"
cmap "github.com/orcaman/concurrent-map" cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
@@ -20,10 +18,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
) )
const TXBatch = 50 const TXBatch = 50
@@ -45,7 +44,7 @@ func TestMain(m *testing.M) {
func setupEnv(t *testing.T) { func setupEnv(t *testing.T) {
var err error var err error
cfg, err = config.NewConfig("../config.json") cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
base.RunImages(t) base.RunImages(t)
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212") priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")

View File

@@ -8,70 +8,35 @@ import (
"time" "time"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
bridgeabi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/relayer" bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry) bridgeL2BatchesGasOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry) bridgeL2BatchesTxsOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry) bridgeL2BatchesBlocksCreatedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry) bridgeL2BatchesCommitsSentTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesOversizedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/oversized/total", metrics.ScrollRegistry) bridgeL2BatchesOversizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/oversized/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry) bridgeL2BatchesGasCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry) bridgeL2BatchesPayloadSizePerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/payload/size/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesPayloadSizePerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/payload/size/per/batch", metrics.ScrollRegistry)
) )
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
// in all blocks included in the batch.
func AddBatchInfoToDB(db database.OrmFactory, batchData *types.BatchData) error {
dbTx, err := db.Beginx()
if err != nil {
return err
}
var dbTxErr error
defer func() {
if dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
}
}()
if dbTxErr = db.NewBatchInDBTx(dbTx, batchData); dbTxErr != nil {
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
if dbTxErr = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchData.Hash().Hex()); dbTxErr != nil {
return dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
}
// BatchProposer sends batches commit transactions to relayer. // BatchProposer sends batches commit transactions to relayer.
type BatchProposer struct { type BatchProposer struct {
mutex sync.Mutex mutex sync.Mutex
ctx context.Context
ctx context.Context db *gorm.DB
orm database.OrmFactory
batchTimeSec uint64 batchTimeSec uint64
batchGasThreshold uint64 batchGasThreshold uint64
@@ -83,18 +48,23 @@ type BatchProposer struct {
commitCalldataMinSize uint64 commitCalldataMinSize uint64
proofGenerationFreq uint64 proofGenerationFreq uint64
batchDataBuffer []*types.BatchData batchDataBuffer []*bridgeTypes.BatchData
relayer *relayer.Layer2Relayer relayer *relayer.Layer2Relayer
piCfg *types.PublicInputHashConfig blockBatchOrm *orm.BlockBatch
blockTraceOrm *orm.BlockTrace
piCfg *bridgeTypes.PublicInputHashConfig
} }
// NewBatchProposer will return a new instance of BatchProposer. // NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer { func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, db *gorm.DB) *BatchProposer {
p := &BatchProposer{ p := &BatchProposer{
mutex: sync.Mutex{}, mutex: sync.Mutex{},
ctx: ctx, ctx: ctx,
orm: orm, db: db,
blockBatchOrm: orm.NewBlockBatch(db),
blockTraceOrm: orm.NewBlockTrace(db),
batchTimeSec: cfg.BatchTimeSec, batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold, batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold, batchTxNumThreshold: cfg.BatchTxNumThreshold,
@@ -119,7 +89,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
func (p *BatchProposer) recoverBatchDataBuffer() { func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order // batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32) batchHashes, err := p.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
if err != nil { if err != nil {
log.Crit("Failed to fetch pending L2 batches", "err", err) log.Crit("Failed to fetch pending L2 batches", "err", err)
} }
@@ -129,17 +99,17 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
log.Info("Load pending batches into batchDataBuffer") log.Info("Load pending batches into batchDataBuffer")
// helper function to cache and get BlockBatch from DB // helper function to cache and get BlockBatch from DB
blockBatchCache := make(map[string]*types.BlockBatch) blockBatchCache := make(map[string]orm.BlockBatch)
getBlockBatch := func(batchHash string) (*types.BlockBatch, error) { getBlockBatch := func(batchHash string) (*orm.BlockBatch, error) {
if blockBatch, ok := blockBatchCache[batchHash]; ok { if blockBatch, ok := blockBatchCache[batchHash]; ok {
return blockBatch, nil return &blockBatch, nil
} }
blockBatches, err := p.orm.GetBlockBatches(map[string]interface{}{"hash": batchHash}) blockBatches, err := p.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 0)
if err != nil || len(blockBatches) == 0 { if err != nil || len(blockBatches) == 0 {
return nil, err return nil, err
} }
blockBatchCache[batchHash] = blockBatches[0] blockBatchCache[batchHash] = blockBatches[0]
return blockBatches[0], nil return &blockBatches[0], nil
} }
// recover the in-memory batchData from DB // recover the in-memory batchData from DB
@@ -157,23 +127,26 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
continue continue
} }
blockInfos, err := p.orm.GetL2BlockInfos( whereFileds := map[string]interface{}{
map[string]interface{}{"batch_hash": batchHash}, "batch_hash": batchHash,
"order by number ASC", }
) orderByList := []string{
"number ASC",
}
blockTraces, err := p.blockTraceOrm.GetL2BlockInfos(whereFileds, orderByList, 0)
if err != nil { if err != nil {
log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err) log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err)
continue continue
} }
if len(blockInfos) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) { if len(blockTraces) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) {
log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB", log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB",
"len(blockInfos)", len(blockInfos), "len(blockInfos)", len(blockTraces),
"expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) "expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1)
continue continue
} }
batchData, err := p.generateBatchData(parentBatch, blockInfos) batchData, err := p.generateBatchData(parentBatch, blockTraces)
if err != nil { if err != nil {
continue continue
} }
@@ -194,16 +167,14 @@ func (p *BatchProposer) TryProposeBatch() {
defer p.mutex.Unlock() defer p.mutex.Unlock()
for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit { for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
blocks, err := p.orm.GetUnbatchedL2Blocks( orderBy := []string{"number ASC"}
map[string]interface{}{}, blockTraces, err := p.blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, orderBy, int(p.batchBlocksLimit))
fmt.Sprintf("order by number ASC LIMIT %d", p.batchBlocksLimit),
)
if err != nil { if err != nil {
log.Error("failed to get unbatched blocks", "err", err) log.Error("failed to get unbatched blocks", "err", err)
return return
} }
batchCreated := p.proposeBatch(blocks) batchCreated := p.proposeBatch(blockTraces)
// while size of batchDataBuffer < commitCalldataMinSize, // while size of batchDataBuffer < commitCalldataMinSize,
// proposer keeps fetching and porposing batches. // proposer keeps fetching and porposing batches.
@@ -232,7 +203,7 @@ func (p *BatchProposer) TryCommitBatches() {
commit := false commit := false
calldataByteLen := uint64(0) calldataByteLen := uint64(0)
for ; index < len(p.batchDataBuffer); index++ { for ; index < len(p.batchDataBuffer); index++ {
calldataByteLen += bridgeabi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch) calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
if calldataByteLen > p.commitCalldataSizeLimit { if calldataByteLen > p.commitCalldataSizeLimit {
commit = true commit = true
if index == 0 { if index == 0 {
@@ -264,13 +235,13 @@ func (p *BatchProposer) TryCommitBatches() {
} }
} }
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool { func (p *BatchProposer) proposeBatch(blockTraces []orm.BlockTrace) bool {
if len(blocks) == 0 { if len(blockTraces) == 0 {
return false return false
} }
approximatePayloadSize := func(hash string) (uint64, error) { approximatePayloadSize := func(hash string) (uint64, error) {
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash}) traces, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -284,49 +255,49 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
return uint64(size), nil return uint64(size), nil
} }
firstSize, err := approximatePayloadSize(blocks[0].Hash) firstSize, err := approximatePayloadSize(blockTraces[0].Hash)
if err != nil { if err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
return false return false
} }
if firstSize > p.commitCalldataSizeLimit { if firstSize > p.commitCalldataSizeLimit {
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize) log.Warn("oversized payload even for only 1 block", "height", blockTraces[0].Number, "size", firstSize)
// note: we should probably fail here once we can ensure this will not happen // note: we should probably fail here once we can ensure this will not happen
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
return false return false
} }
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
bridgeL2BatchesOversizedTotalCounter.Inc(1) bridgeL2BatchesOversizedTotalCounter.Inc(1)
return true return true
} }
if blocks[0].GasUsed > p.batchGasThreshold { if blockTraces[0].GasUsed > p.batchGasThreshold {
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1) bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed) log.Warn("gas overflow even for only 1 block", "height", blockTraces[0].Number, "gas", blockTraces[0].GasUsed)
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
} }
return true return true
} }
if blocks[0].TxNum > p.batchTxNumThreshold { if blockTraces[0].TxNum > p.batchTxNumThreshold {
bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1) bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1)
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum) log.Warn("too many txs even for only 1 block", "height", blockTraces[0].Number, "tx_num", blockTraces[0].TxNum)
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
} }
@@ -336,7 +307,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
var gasUsed, txNum, payloadSize uint64 var gasUsed, txNum, payloadSize uint64
reachThreshold := false reachThreshold := false
// add blocks into batch until reach batchGasThreshold // add blocks into batch until reach batchGasThreshold
for i, block := range blocks { for i, block := range blockTraces {
size, err := approximatePayloadSize(block.Hash) size, err := approximatePayloadSize(block.Hash)
if err != nil { if err != nil {
log.Error("failed to create batch", "number", block.Number, "err", err) log.Error("failed to create batch", "number", block.Number, "err", err)
@@ -344,7 +315,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
} }
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) { if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
blocks = blocks[:i] blockTraces = blockTraces[:i]
reachThreshold = true reachThreshold = true
break break
} }
@@ -356,24 +327,24 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch: // if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch, // if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch // otherwise we will still propose a batch
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) { if !reachThreshold && blockTraces[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return false return false
} }
if err := p.createBatchForBlocks(blocks); err != nil { if err := p.createBatchForBlocks(blockTraces); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err) log.Error("failed to create batch", "from", blockTraces[0].Number, "to", blockTraces[len(blockTraces)-1].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(payloadSize)) bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(payloadSize))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks))) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blockTraces)))
} }
return true return true
} }
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error { func (p *BatchProposer) createBatchForBlocks(blocks []orm.BlockTrace) error {
lastBatch, err := p.orm.GetLatestBatch() lastBatch, err := p.blockBatchOrm.GetLatestBatch()
if err != nil { if err != nil {
// We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block. // We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block.
return err return err
@@ -385,7 +356,7 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
return err return err
} }
if err := AddBatchInfoToDB(p.orm, batchData); err != nil { if err := orm.AddBatchInfoToDB(p.db, batchData); err != nil {
log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err) log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err)
return err return err
} }
@@ -394,22 +365,29 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
return nil return nil
} }
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) { func (p *BatchProposer) generateBatchData(parentBatch *orm.BlockBatch, blocks []orm.BlockTrace) (*bridgeTypes.BatchData, error) {
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
for _, block := range blocks { for _, block := range blocks {
trs, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash}) trs, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash})
if err != nil || len(trs) != 1 { if err != nil || len(trs) != 1 {
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err) log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
return nil, err return nil, err
} }
wrappedBlocks = append(wrappedBlocks, trs[0]) wrappedBlocks = append(wrappedBlocks, trs[0])
} }
return types.NewBatchData(parentBatch, wrappedBlocks, p.piCfg), nil
parentBatchInfo := bridgeTypes.BatchInfo{
Index: parentBatch.Index,
Hash: parentBatch.Hash,
StateRoot: parentBatch.StateRoot,
}
return bridgeTypes.NewBatchData(&parentBatchInfo, wrappedBlocks, p.piCfg), nil
} }
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) { func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
for _, batchData := range p.batchDataBuffer { for _, batchData := range p.batchDataBuffer {
size += bridgeabi.GetBatchCalldataLength(&batchData.Batch) size += bridgeAbi.GetBatchCalldataLength(&batchData.Batch)
} }
return return
} }

View File

@@ -0,0 +1,206 @@
package watcher
import (
"context"
"math"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
gethTtypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
)
func testBatchProposerProposeBatch(t *testing.T) {
db := setupDB(t)
defer bridgeUtils.CloseDB(db)
p := &BatchProposer{
batchGasThreshold: 1000,
batchTxNumThreshold: 10,
batchTimeSec: 300,
commitCalldataSizeLimit: 500,
}
var blockTrace *orm.BlockTrace
patchGuard := gomonkey.ApplyMethodFunc(blockTrace, "GetL2WrappedBlocks", func(fields map[string]interface{}) ([]*bridgeTypes.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
return []*bridgeTypes.WrappedBlock{{
Transactions: []*gethTtypes.TransactionData{{
Data: longData,
}},
}}, nil
}
return []*bridgeTypes.WrappedBlock{{
Transactions: []*gethTtypes.TransactionData{{
Data: "short",
}},
}}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
block1 := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := orm.BlockTrace{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
block3 := orm.BlockTrace{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
block4 := orm.BlockTrace{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
blockOutdated := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
blockWithLongData := orm.BlockTrace{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
testCases := []struct {
description string
blocks []orm.BlockTrace
expectedRes bool
}{
{"Empty block list", []orm.BlockTrace{}, false},
{"Single block exceeding gas threshold", []orm.BlockTrace{block4}, true},
{"Single block exceeding transaction number threshold", []orm.BlockTrace{block3}, true},
{"Multiple blocks meeting thresholds", []orm.BlockTrace{block1, block2, block3}, true},
{"Multiple blocks not meeting thresholds", []orm.BlockTrace{block1, block2}, false},
{"Outdated and valid block", []orm.BlockTrace{blockOutdated, block2}, true},
{"Single block with long data", []orm.BlockTrace{blockWithLongData}, true},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
})
}
}
func testBatchProposerBatchGeneration(t *testing.T) {
db := setupDB(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
bridgeUtils.CloseDB(db)
cancel()
}()
blockTraceOrm := orm.NewBlockTrace(db)
// Insert traces into db.
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
blockBatchOrm := orm.NewBlockBatch(db)
batch, err := blockBatchOrm.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := bridgeTypes.NewBatchData(&bridgeTypes.BatchInfo{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.TryProposeBatch()
infos, err := blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, []string{"number ASC"}, 100)
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData.Hash().Hex()}, nil, 1)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
}
func testBatchProposerGracefulRestart(t *testing.T) {
db := setupDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
blockTraceOrm := orm.NewBlockTrace(db)
// Insert traces into db.
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
parentBatch1 := &bridgeTypes.BatchInfo{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &bridgeTypes.BatchInfo{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
blockBatchOrm := orm.NewBlockBatch(db)
err = db.Transaction(func(tx *gorm.DB) error {
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
_, dbTxErr = blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData2)
if dbTxErr != nil {
return dbTxErr
}
numbers1 := []uint64{batchData1.Batch.Blocks[0].BlockNumber}
hash1 := batchData1.Hash().Hex()
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers1, hash1)
if dbTxErr != nil {
return dbTxErr
}
numbers2 := []uint64{batchData2.Batch.Blocks[0].BlockNumber}
hash2 := batchData2.Hash().Hex()
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers2, hash2)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err)
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized)
assert.NoError(t, err)
batchHashes, err := blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData2.Hash().Hex()}, nil, 1)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
}

View File

@@ -7,28 +7,27 @@ import (
geth "github.com/scroll-tech/go-ethereum" geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
bridge_abi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/utils"
) )
var ( var (
bridgeL1MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry) bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry) bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
) )
type rollupEvent struct { type rollupEvent struct {
@@ -39,9 +38,12 @@ type rollupEvent struct {
// L1WatcherClient will listen for smart contract events from Eth L1. // L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct { type L1WatcherClient struct {
ctx context.Context ctx context.Context
client *ethclient.Client client *ethclient.Client
db database.OrmFactory l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
l1BlockOrm *orm.L1Block
l1BatchOrm *orm.BlockBatch
// The number of new blocks to wait for a block to be confirmed // The number of new blocks to wait for a block to be confirmed
confirmations rpc.BlockNumber confirmations rpc.BlockNumber
@@ -62,8 +64,9 @@ type L1WatcherClient struct {
} }
// NewL1WatcherClient returns a new instance of L1WatcherClient. // NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient { func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
savedHeight, err := db.GetLayer1LatestWatchedHeight() l1MessageOrm := orm.NewL1Message(db)
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch height from db", "err", err) log.Warn("Failed to fetch height from db", "err", err)
savedHeight = 0 savedHeight = 0
@@ -72,7 +75,8 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
savedHeight = int64(startHeight) savedHeight = int64(startHeight)
} }
savedL1BlockHeight, err := db.GetLatestL1BlockHeight() l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0 savedL1BlockHeight = 0
@@ -84,17 +88,20 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
return &L1WatcherClient{ return &L1WatcherClient{
ctx: ctx, ctx: ctx,
client: client, client: client,
db: db, l1MessageOrm: l1MessageOrm,
l1BlockOrm: l1BlockOrm,
l1BatchOrm: orm.NewBlockBatch(db),
l2MessageOrm: orm.NewL2Message(db),
confirmations: confirmations, confirmations: confirmations,
messengerAddress: messengerAddress, messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1ScrollMessengerABI, messengerABI: bridgeAbi.L1ScrollMessengerABI,
messageQueueAddress: messageQueueAddress, messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L1MessageQueueABI, messageQueueABI: bridgeAbi.L1MessageQueueABI,
scrollChainAddress: scrollChainAddress, scrollChainAddress: scrollChainAddress,
scrollChainABI: bridge_abi.ScrollChainABI, scrollChainABI: bridgeAbi.ScrollChainABI,
processedMsgHeight: uint64(savedHeight), processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight, processedBlockHeight: savedL1BlockHeight,
@@ -130,11 +137,11 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1 toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
} }
var blocks []*types.L1BlockInfo var blocks []orm.L1Block
var err error var err error
height := fromBlock height := fromBlock
for ; height <= toBlock; height++ { for ; height <= toBlock; height++ {
var block *geth_types.Header var block *gethTypes.Header
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height)) block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
if err != nil { if err != nil {
log.Warn("Failed to get block", "height", height, "err", err) log.Warn("Failed to get block", "height", height, "err", err)
@@ -144,7 +151,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
if block.BaseFee != nil { if block.BaseFee != nil {
baseFee = block.BaseFee.Uint64() baseFee = block.BaseFee.Uint64()
} }
blocks = append(blocks, &types.L1BlockInfo{ blocks = append(blocks, orm.L1Block{
Number: uint64(height), Number: uint64(height),
Hash: block.Hash().String(), Hash: block.Hash().String(),
BaseFee: baseFee, BaseFee: baseFee,
@@ -158,7 +165,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
toBlock = height - 1 toBlock = height - 1
// insert succeed blocks // insert succeed blocks
err = w.db.InsertL1Blocks(w.ctx, blocks) err = w.l1BlockOrm.InsertL1Blocks(w.ctx, blocks)
if err != nil { if err != nil {
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err) log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
return err return err
@@ -202,11 +209,11 @@ func (w *L1WatcherClient) FetchContractEvent() error {
Topics: make([][]common.Hash, 1), Topics: make([][]common.Hash, 1),
} }
query.Topics[0] = make([]common.Hash, 5) query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = bridge_abi.L1QueueTransactionEventSignature query.Topics[0][0] = bridgeAbi.L1QueueTransactionEventSignature
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature query.Topics[0][1] = bridgeAbi.L1RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature query.Topics[0][2] = bridgeAbi.L1FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature query.Topics[0][3] = bridgeAbi.L1CommitBatchEventSignature
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature query.Topics[0][4] = bridgeAbi.L1FinalizeBatchEventSignature
logs, err := w.client.FilterLogs(w.ctx, query) logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil { if err != nil {
@@ -238,7 +245,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
for _, event := range rollupEvents { for _, event := range rollupEvents {
batchHashes = append(batchHashes, event.batchHash.String()) batchHashes = append(batchHashes, event.batchHash.String())
} }
statuses, err := w.db.GetRollupStatusByHashList(batchHashes) statuses, err := w.l1BatchOrm.GetRollupStatusByHashList(batchHashes)
if err != nil { if err != nil {
log.Error("Failed to GetRollupStatusByHashList", "err", err) log.Error("Failed to GetRollupStatusByHashList", "err", err)
return err return err
@@ -254,9 +261,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
// only update when db status is before event status // only update when db status is before event status
if event.status > status { if event.status > status {
if event.status == types.RollupFinalized { if event.status == types.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) err = w.l1BatchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} else if event.status == types.RollupCommitted { } else if event.status == types.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) err = w.l1BatchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} }
if err != nil { if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err) log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
@@ -274,13 +281,13 @@ func (w *L1WatcherClient) FetchContractEvent() error {
} else { } else {
msgStatus = types.MsgFailed msgStatus = types.MsgFailed
} }
if err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil { if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err) log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err return err
} }
} }
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil { if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err return err
} }
@@ -291,17 +298,16 @@ func (w *L1WatcherClient) FetchContractEvent() error {
return nil return nil
} }
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) { func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l1Messages []*orm.L1Message
var l1Messages []*types.L1Message
var relayedMessages []relayedMessage var relayedMessages []relayedMessage
var rollupEvents []rollupEvent var rollupEvents []rollupEvent
for _, vLog := range logs { for _, vLog := range logs {
switch vLog.Topics[0] { switch vLog.Topics[0] {
case bridge_abi.L1QueueTransactionEventSignature: case bridgeAbi.L1QueueTransactionEventSignature:
event := bridge_abi.L1QueueTransactionEvent{} event := bridgeAbi.L1QueueTransactionEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog) err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err) log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
@@ -310,7 +316,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
msgHash := common.BytesToHash(crypto.Keccak256(event.Data)) msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
l1Messages = append(l1Messages, &types.L1Message{ l1Messages = append(l1Messages, &orm.L1Message{
QueueIndex: event.QueueIndex.Uint64(), QueueIndex: event.QueueIndex.Uint64(),
MsgHash: msgHash.String(), MsgHash: msgHash.String(),
Height: vLog.BlockNumber, Height: vLog.BlockNumber,
@@ -321,8 +327,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
GasLimit: event.GasLimit.Uint64(), GasLimit: event.GasLimit.Uint64(),
Layer1Hash: vLog.TxHash.Hex(), Layer1Hash: vLog.TxHash.Hex(),
}) })
case bridge_abi.L1RelayedMessageEventSignature: case bridgeAbi.L1RelayedMessageEventSignature:
event := bridge_abi.L1RelayedMessageEvent{} event := bridgeAbi.L1RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err) log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
@@ -334,8 +340,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: true, isSuccessful: true,
}) })
case bridge_abi.L1FailedRelayedMessageEventSignature: case bridgeAbi.L1FailedRelayedMessageEventSignature:
event := bridge_abi.L1FailedRelayedMessageEvent{} event := bridgeAbi.L1FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err) log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
@@ -347,8 +353,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: false, isSuccessful: false,
}) })
case bridge_abi.L1CommitBatchEventSignature: case bridgeAbi.L1CommitBatchEventSignature:
event := bridge_abi.L1CommitBatchEvent{} event := bridgeAbi.L1CommitBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog) err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err) log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
@@ -360,8 +366,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
status: types.RollupCommitted, status: types.RollupCommitted,
}) })
case bridge_abi.L1FinalizeBatchEventSignature: case bridgeAbi.L1FinalizeBatchEventSignature:
event := bridge_abi.L1FinalizeBatchEvent{} event := bridgeAbi.L1FinalizeBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog) err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err) log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)

View File

@@ -11,31 +11,24 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
commonTypes "scroll-tech/common/types" commonTypes "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
) )
func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) { func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupDB(t)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
client, err := ethclient.Dial(base.L1gethImg.Endpoint()) client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err) assert.NoError(t, err)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db) watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
return watcher, db return watcher, db
@@ -43,13 +36,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) {
func testFetchContractEvent(t *testing.T) { func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
} }
func testL1WatcherClientFetchBlockHeader(t *testing.T) { func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
convey.Convey("test toBlock < fromBlock", t, func() { convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64 var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 { if watcher.ProcessedBlockHeight() <= 0 {
@@ -73,6 +66,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
}) })
var l1BlockOrm *orm.L1Block
convey.Convey("insert l1 block error", t, func() { convey.Convey("insert l1 block error", t, func() {
var c *ethclient.Client var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) { patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
@@ -86,7 +80,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error { patchGuard.ApplyMethodFunc(l1BlockOrm, "InsertL1Blocks", func(ctx context.Context, blocks []orm.L1Block) error {
return errors.New("insert failed") return errors.New("insert failed")
}) })
@@ -108,7 +102,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error { patchGuard.ApplyMethodFunc(l1BlockOrm, "InsertL1Blocks", func(ctx context.Context, blocks []orm.L1Block) error {
return nil return nil
}) })
@@ -120,7 +114,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
func testL1WatcherClientFetchContractEvent(t *testing.T) { func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
watcher.SetConfirmations(rpc.SafeBlockNumber) watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() { convey.Convey("get latest confirmed block number failure", t, func() {
@@ -165,14 +159,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
convey.Convey("parse bridge event logs failure", t, func() { convey.Convey("parse bridge event logs failure", t, func() {
targetErr := errors.New("parse log failure") targetErr := errors.New("parse log failure")
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) { patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
return nil, nil, nil, targetErr return nil, nil, nil, targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error()) assert.Equal(t, err.Error(), targetErr.Error())
}) })
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) { patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
rollupEvents := []rollupEvent{ rollupEvents := []rollupEvent{
{ {
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
@@ -201,9 +195,10 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
return nil, relayedMessageEvents, rollupEvents, nil return nil, relayedMessageEvents, rollupEvents, nil
}) })
var blockBatchOrm *orm.BlockBatch
convey.Convey("db get rollup status by hash list failure", t, func() { convey.Convey("db get rollup status by hash list failure", t, func() {
targetErr := errors.New("get db failure") targetErr := errors.New("get db failure")
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
return nil, targetErr return nil, targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
@@ -211,7 +206,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
}) })
convey.Convey("rollup status mismatch batch hashes length", t, func() { convey.Convey("rollup status mismatch batch hashes length", t, func() {
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{ s := []commonTypes.RollupStatus{
commonTypes.RollupFinalized, commonTypes.RollupFinalized,
} }
@@ -221,7 +216,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{ s := []commonTypes.RollupStatus{
commonTypes.RollupPending, commonTypes.RollupPending,
commonTypes.RollupCommitting, commonTypes.RollupCommitting,
@@ -231,53 +226,55 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
convey.Convey("db update RollupFinalized status failure", t, func() { convey.Convey("db update RollupFinalized status failure", t, func() {
targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure") targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure")
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr return targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error()) assert.Equal(t, targetErr.Error(), err.Error())
}) })
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil return nil
}) })
convey.Convey("db update RollupCommitted status failure", t, func() { convey.Convey("db update RollupCommitted status failure", t, func() {
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure") targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure")
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr return targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error()) assert.Equal(t, targetErr.Error(), err.Error())
}) })
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil return nil
}) })
var l2MessageOrm *orm.L2Message
convey.Convey("db update layer2 status and layer1 hash failure", t, func() { convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure") targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error { patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return targetErr return targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error()) assert.Equal(t, targetErr.Error(), err.Error())
}) })
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error { patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return nil return nil
}) })
var l1MessageOrm *orm.L1Message
convey.Convey("db save l1 message failure", t, func() { convey.Convey("db save l1 message failure", t, func() {
targetErr := errors.New("SaveL1Messages failure") targetErr := errors.New("SaveL1Messages failure")
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error { patchGuard.ApplyMethodFunc(l1MessageOrm, "SaveL1Messages", func(context.Context, []*orm.L1Message) error {
return targetErr return targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error()) assert.Equal(t, targetErr.Error(), err.Error())
}) })
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error { patchGuard.ApplyMethodFunc(l1MessageOrm, "SaveL1Messages", func(context.Context, []*orm.L1Message) error {
return nil return nil
}) })
@@ -289,11 +286,11 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []geth_types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1QueueTransactionEventSignature}, Topics: []common.Hash{bridgeAbi.L1QueueTransactionEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -315,7 +312,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
convey.Convey("L1QueueTransactionEventSignature success", t, func() { convey.Convey("L1QueueTransactionEventSignature success", t, func() {
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1QueueTransactionEvent) tmpOut := out.(*bridgeAbi.L1QueueTransactionEvent)
tmpOut.QueueIndex = big.NewInt(100) tmpOut.QueueIndex = big.NewInt(100)
tmpOut.Data = []byte("test data") tmpOut.Data = []byte("test data")
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
@@ -337,11 +334,11 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []geth_types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1RelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -364,7 +361,7 @@ func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
convey.Convey("L1RelayedMessageEventSignature success", t, func() { convey.Convey("L1RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1RelayedMessageEvent) tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -381,11 +378,10 @@ func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []types.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1FailedRelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -408,7 +404,7 @@ func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() { convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FailedRelayedMessageEvent) tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -425,11 +421,10 @@ func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T)
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []types.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1CommitBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -452,7 +447,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
convey.Convey("L1CommitBatchEventSignature success", t, func() { convey.Convey("L1CommitBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1CommitBatchEvent) tmpOut := out.(*bridgeAbi.L1CommitBatchEvent)
tmpOut.BatchHash = msgHash tmpOut.BatchHash = msgHash
return nil return nil
}) })
@@ -470,11 +465,10 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []types.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1FinalizeBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -497,7 +491,7 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
convey.Convey("L1FinalizeBatchEventSignature success", t, func() { convey.Convey("L1FinalizeBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FinalizeBatchEvent) tmpOut := out.(*bridgeAbi.L1FinalizeBatchEvent)
tmpOut.BatchHash = msgHash tmpOut.BatchHash = msgHash
return nil return nil
}) })

View File

@@ -10,31 +10,31 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/common/hexutil"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
bridge_abi "scroll-tech/bridge/abi" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/utils"
) )
// Metrics // Metrics
var ( var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry) bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry) bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry) bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry) bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
) )
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth // L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
@@ -44,7 +44,11 @@ type L2WatcherClient struct {
*ethclient.Client *ethclient.Client
orm database.OrmFactory db *gorm.DB
blockBatchOrm *orm.BlockBatch
blockTraceOrm *orm.BlockTrace
l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
confirmations rpc.BlockNumber confirmations rpc.BlockNumber
@@ -62,25 +66,31 @@ type L2WatcherClient struct {
} }
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance // NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient { func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight() l2MessageOrm := orm.NewL2Message(db)
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
if err != nil { if err != nil {
log.Warn("fetch height from db failed", "err", err) log.Warn("fetch height from db failed", "err", err)
savedHeight = 0 savedHeight = 0
} }
w := L2WatcherClient{ w := L2WatcherClient{
ctx: ctx, ctx: ctx,
Client: client, db: db,
orm: orm, Client: client,
blockBatchOrm: orm.NewBlockBatch(db),
blockTraceOrm: orm.NewBlockTrace(db),
l1MessageOrm: orm.NewL1Message(db),
l2MessageOrm: l2MessageOrm,
processedMsgHeight: uint64(savedHeight), processedMsgHeight: uint64(savedHeight),
confirmations: confirmations, confirmations: confirmations,
messengerAddress: messengerAddress, messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2ScrollMessengerABI, messengerABI: bridgeAbi.L2ScrollMessengerABI,
messageQueueAddress: messageQueueAddress, messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L2MessageQueueABI, messageQueueABI: bridgeAbi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot, withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0, stopped: 0,
@@ -95,7 +105,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
} }
func (w *L2WatcherClient) initializeGenesis() error { func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil { if count, err := w.blockBatchOrm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err) return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 { } else if count > 0 {
log.Info("genesis already imported") log.Info("genesis already imported")
@@ -109,21 +119,25 @@ func (w *L2WatcherClient) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
blockTrace := &types.WrappedBlock{Header: genesis, Transactions: nil, WithdrawTrieRoot: common.Hash{}} blockTrace := &bridgeTypes.WrappedBlock{
batchData := types.NewGenesisBatchData(blockTrace) Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
}
batchData := bridgeTypes.NewGenesisBatchData(blockTrace)
if err = AddBatchInfoToDB(w.orm, batchData); err != nil { if err = orm.AddBatchInfoToDB(w.db, batchData); err != nil {
log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err) log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err)
return err return err
} }
batchHash := batchData.Hash().Hex() batchHash := batchData.Hash().Hex()
if err = w.orm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil { if err = w.blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err) return fmt.Errorf("failed to update genesis batch proving status: %v", err)
} }
if err = w.orm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil { if err = w.blockBatchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err) return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
} }
@@ -139,7 +153,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, bloc
// Get newest block in DB. must have blocks at that time. // Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number, // Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped // because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetL2BlocksLatestHeight() heightInDB, err := w.blockTraceOrm.GetL2BlocksLatestHeight()
if err != nil { if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err) log.Error("failed to GetL2BlocksLatestHeight", "err", err)
return return
@@ -168,11 +182,11 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, bloc
} }
} }
func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData { func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*geth_types.TransactionData, len(txs)) txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs { for i, tx := range txs {
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
txsData[i] = &geth_types.TransactionData{ txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(), Type: tx.Type(),
TxHash: tx.Hash().String(), TxHash: tx.Hash().String(),
Nonce: tx.Nonce(), Nonce: tx.Nonce(),
@@ -192,8 +206,7 @@ func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
} }
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error { func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock var blocks []*bridgeTypes.WrappedBlock
for number := from; number <= to; number++ { for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number) log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number))) block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -208,7 +221,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number) return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
} }
blocks = append(blocks, &types.WrappedBlock{ blocks = append(blocks, &bridgeTypes.WrappedBlock{
Header: block.Header(), Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()), Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot), WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
@@ -216,7 +229,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
} }
if len(blocks) > 0 { if len(blocks) > 0 {
if err := w.orm.InsertWrappedBlocks(blocks); err != nil { if err := w.blockTraceOrm.InsertWrappedBlocks(blocks); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err) return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
} }
} }
@@ -257,10 +270,10 @@ func (w *L2WatcherClient) FetchContractEvent() {
Topics: make([][]common.Hash, 1), Topics: make([][]common.Hash, 1),
} }
query.Topics[0] = make([]common.Hash, 4) query.Topics[0] = make([]common.Hash, 4)
query.Topics[0][0] = bridge_abi.L2SentMessageEventSignature query.Topics[0][0] = bridgeAbi.L2SentMessageEventSignature
query.Topics[0][1] = bridge_abi.L2RelayedMessageEventSignature query.Topics[0][1] = bridgeAbi.L2RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L2FailedRelayedMessageEventSignature query.Topics[0][2] = bridgeAbi.L2FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L2AppendMessageEventSignature query.Topics[0][3] = bridgeAbi.L2AppendMessageEventSignature
logs, err := w.FilterLogs(w.ctx, query) logs, err := w.FilterLogs(w.ctx, query)
if err != nil { if err != nil {
@@ -295,13 +308,13 @@ func (w *L2WatcherClient) FetchContractEvent() {
} else { } else {
msgStatus = types.MsgFailed msgStatus = types.MsgFailed
} }
if err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil { if err = w.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err) log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return return
} }
} }
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil { if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err) log.Error("failed to save l2 messages", "err", err)
return return
} }
@@ -311,18 +324,18 @@ func (w *L2WatcherClient) FetchContractEvent() {
} }
} }
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) { func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l2Messages []*types.L2Message var l2Messages []orm.L2Message
var relayedMessages []relayedMessage var relayedMessages []relayedMessage
var lastAppendMsgHash common.Hash var lastAppendMsgHash common.Hash
var lastAppendMsgNonce uint64 var lastAppendMsgNonce uint64
for _, vLog := range logs { for _, vLog := range logs {
switch vLog.Topics[0] { switch vLog.Topics[0] {
case bridge_abi.L2SentMessageEventSignature: case bridgeAbi.L2SentMessageEventSignature:
event := bridge_abi.L2SentMessageEvent{} event := bridgeAbi.L2SentMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
if err != nil { if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err) log.Error("failed to unpack layer2 SentMessage event", "err", err)
@@ -350,7 +363,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
return l2Messages, relayedMessages, errors.New(errMsg) return l2Messages, relayedMessages, errors.New(errMsg)
} }
l2Messages = append(l2Messages, &types.L2Message{ l2Messages = append(l2Messages, orm.L2Message{
Nonce: event.MessageNonce.Uint64(), Nonce: event.MessageNonce.Uint64(),
MsgHash: computedMsgHash.String(), MsgHash: computedMsgHash.String(),
Height: vLog.BlockNumber, Height: vLog.BlockNumber,
@@ -360,8 +373,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
Calldata: common.Bytes2Hex(event.Message), Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(), Layer2Hash: vLog.TxHash.Hex(),
}) })
case bridge_abi.L2RelayedMessageEventSignature: case bridgeAbi.L2RelayedMessageEventSignature:
event := bridge_abi.L2RelayedMessageEvent{} event := bridgeAbi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err) log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
@@ -373,8 +386,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: true, isSuccessful: true,
}) })
case bridge_abi.L2FailedRelayedMessageEventSignature: case bridgeAbi.L2FailedRelayedMessageEventSignature:
event := bridge_abi.L2FailedRelayedMessageEvent{} event := bridgeAbi.L2FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err) log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
@@ -386,8 +399,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: false, isSuccessful: false,
}) })
case bridge_abi.L2AppendMessageEventSignature: case bridgeAbi.L2AppendMessageEventSignature:
event := bridge_abi.L2AppendMessageEvent{} event := bridgeAbi.L2AppendMessageEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog) err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err) log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)

View File

@@ -9,54 +9,43 @@ import (
"testing" "testing"
"time" "time"
"gorm.io/gorm"
"github.com/agiledragon/gomonkey/v2" "github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/types" "scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
) )
func setupL2Watcher(t *testing.T) *L2WatcherClient { func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupDB(t)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db) watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher return watcher, db
} }
func testCreateNewWatcherAndStop(t *testing.T) { func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db. wc, db := setupL2Watcher(t)
l2db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
l2db.Close() defer utils.CloseDB(db)
}() }()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
l1cfg := cfg.L1Config l1cfg := cfg.L1Config
@@ -79,20 +68,13 @@ func testCreateNewWatcherAndStop(t *testing.T) {
} }
func testMonitorBridgeContract(t *testing.T) { func testMonitorBridgeContract(t *testing.T) {
// Create db handler and reset db. wc, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
db.Close() defer utils.CloseDB(db)
}() }()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background()) previousHeight, err := l2Cli.BlockNumber(context.Background())
@@ -117,7 +99,7 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx) receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
@@ -127,34 +109,30 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx) receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events // check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
height, err := db.GetLayer2LatestWatchedHeight() height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight) return err == nil && height > int64(previousHeight)
})) }))
// check l1 messages. // check l1 messages.
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending}) msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 2 return err == nil && len(msgs) == 2
})) }))
} }
func testFetchMultipleSentMessageInOneBlock(t *testing.T) { func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Create db handler and reset db. _, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
db.Close() defer utils.CloseDB(db)
}() }()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
@@ -172,8 +150,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Call mock_bridge instance sendMessage to trigger emit events multiple times // Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4 numTransactions := 4
var tx *geth_types.Transaction var tx *gethTypes.Transaction
for i := 0; i < numTransactions; i++ { for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63") addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr) nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
@@ -188,7 +165,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
} }
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx) receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
@@ -204,28 +181,26 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx) receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events // check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
height, err := db.GetLayer2LatestWatchedHeight() height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight) return err == nil && height > int64(previousHeight)
})) }))
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending}) msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 5 return err == nil && len(msgs) == 5
})) }))
} }
func testFetchRunningMissingBlocks(t *testing.T) { func testFetchRunningMissingBlocks(t *testing.T) {
// Create db handler and reset db. _, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0]) auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
@@ -235,6 +210,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx) address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err) assert.NoError(t, err)
blockTraceOrm := orm.NewBlockTrace(db)
ok := cutils.TryTimes(10, func() bool { ok := cutils.TryTimes(10, func() bool {
latestHeight, err := l2Cli.BlockNumber(context.Background()) latestHeight, err := l2Cli.BlockNumber(context.Background())
if err != nil { if err != nil {
@@ -242,13 +218,13 @@ func testFetchRunningMissingBlocks(t *testing.T) {
} }
wc := prepareWatcherClient(l2Cli, db, address) wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight) wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight)
fetchedHeight, err := db.GetL2BlocksLatestHeight() fetchedHeight, err := blockTraceOrm.GetL2BlocksLatestHeight()
return err == nil && uint64(fetchedHeight) == latestHeight return err == nil && uint64(fetchedHeight) == latestHeight
}) })
assert.True(t, ok) assert.True(t, ok)
} }
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *L2WatcherClient { func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db) return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
} }
@@ -268,11 +244,13 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
} }
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{ Topics: []common.Hash{
bridge_abi.L2SentMessageEventSignature, bridgeAbi.L2SentMessageEventSignature,
}, },
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
@@ -281,7 +259,7 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
convey.Convey("unpack SentMessage log failure", t, func() { convey.Convey("unpack SentMessage log failure", t, func() {
targetErr := errors.New("UnpackLog SentMessage failure") targetErr := errors.New("UnpackLog SentMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -298,8 +276,8 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
tmpValue := big.NewInt(1000) tmpValue := big.NewInt(1000)
tmpMessageNonce := big.NewInt(100) tmpMessageNonce := big.NewInt(100)
tmpMessage := []byte("test for L2SentMessageEventSignature") tmpMessage := []byte("test for L2SentMessageEventSignature")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2SentMessageEvent) tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
tmpOut.Sender = tmpSendAddr tmpOut.Sender = tmpSendAddr
tmpOut.Value = tmpValue tmpOut.Value = tmpValue
tmpOut.Target = tmpTargetAddr tmpOut.Target = tmpTargetAddr
@@ -317,10 +295,12 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
} }
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2RelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2RelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -328,7 +308,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
convey.Convey("unpack RelayedMessage log failure", t, func() { convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure") targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -341,8 +321,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
convey.Convey("L2RelayedMessageEventSignature success", t, func() { convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2RelayedMessageEvent) tmpOut := out.(*bridgeAbi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -357,10 +337,12 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
} }
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2FailedRelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -368,7 +350,7 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("unpack FailedRelayedMessage log failure", t, func() { convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure") targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -381,8 +363,8 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() { convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2FailedRelayedMessageEvent) tmpOut := out.(*bridgeAbi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -397,10 +379,11 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
} }
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2AppendMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -408,7 +391,7 @@ func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
convey.Convey("unpack AppendMessage log failure", t, func() { convey.Convey("unpack AppendMessage log failure", t, func() {
targetErr := errors.New("UnpackLog AppendMessage failure") targetErr := errors.New("UnpackLog AppendMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -421,8 +404,8 @@ func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
convey.Convey("L2AppendMessageEventSignature success", t, func() { convey.Convey("L2AppendMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2AppendMessageEvent) tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
tmpOut.Index = big.NewInt(100) tmpOut.Index = big.NewInt(100)
return nil return nil

View File

@@ -7,11 +7,14 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -24,47 +27,61 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
// Load config. // Load config.
cfg, err = config.NewConfig("../config.json") cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
base.RunImages(t) base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig cfg.DBConfig = &config.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = base.L2Client()
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
return err return err
} }
func setupDB(t *testing.T) *gorm.DB {
db, err := utils.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() base = docker.NewDockerApp()

View File

@@ -0,0 +1,314 @@
package orm
import (
"context"
"encoding/json"
"errors"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
bridgeTypes "scroll-tech/bridge/internal/types"
)
// BlockBatch is structure of stored block batch message
type BlockBatch struct {
db *gorm.DB `gorm:"column:-"`
Hash string `json:"hash" gorm:"column:hash"`
Index uint64 `json:"index" gorm:"column:index"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof"`
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
}
// NewBlockBatch create an blockBatchOrm instance
func NewBlockBatch(db *gorm.DB) *BlockBatch {
return &BlockBatch{db: db}
}
// TableName define the BlockBatch table name
func (*BlockBatch) TableName() string {
return "block_batch"
}
// GetBatchCount get the batch count
func (o *BlockBatch) GetBatchCount() (int64, error) {
var count int64
if err := o.db.Model(&BlockBatch{}).Count(&count).Error; err != nil {
return 0, err
}
return count, nil
}
// GetBlockBatches get the select block batches
func (o *BlockBatch) GetBlockBatches(fields map[string]interface{}, orderByList []string, limit int) ([]BlockBatch, error) {
var blockBatches []BlockBatch
db := o.db
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if err := db.Find(&blockBatches).Error; err != nil {
return nil, err
}
return blockBatches, nil
}
// GetBlockBatchesHashByRollupStatus get the block batches by rollup status
func (o *BlockBatch) GetBlockBatchesHashByRollupStatus(status types.RollupStatus, limit int) ([]string, error) {
var blockBatches []BlockBatch
err := o.db.Select("hash").Where("rollup_status", int(status)).Order("index ASC").Limit(limit).Find(&blockBatches).Error
if err != nil {
return nil, err
}
var hashes []string
for _, v := range blockBatches {
hashes = append(hashes, v.Hash)
}
return hashes, nil
}
// GetVerifiedProofByHash get verified proof and instance comments by hash
func (o *BlockBatch) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
result := o.db.Model(&BlockBatch{}).Select("proof").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Row()
if result.Err() != nil {
return nil, result.Err()
}
var proofBytes []byte
if err := result.Scan(&proofBytes); err != nil {
return nil, err
}
var proof message.AggProof
if err := json.Unmarshal(proofBytes, &proof); err != nil {
return nil, err
}
return &proof, nil
}
// GetLatestBatch get the latest batch
// because we will `initializeGenesis()` when we start the `L2Watcher`, so a batch must exist.
func (o *BlockBatch) GetLatestBatch() (*BlockBatch, error) {
var blockBatch BlockBatch
err := o.db.Order("index DESC").Limit(1).First(&blockBatch).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, err
}
return &blockBatch, nil
}
// GetLatestBatchByRollupStatus get the latest block batch by rollup status
func (o *BlockBatch) GetLatestBatchByRollupStatus(rollupStatuses []types.RollupStatus) (*BlockBatch, error) {
var tmpRollupStatus []int
for _, v := range rollupStatuses {
tmpRollupStatus = append(tmpRollupStatus, int(v))
}
var blockBatch BlockBatch
err := o.db.Where("rollup_status IN (?)", tmpRollupStatus).Order("index DESC").Limit(1).First(&blockBatch).Error
if err != nil {
return nil, err
}
return &blockBatch, nil
}
// GetRollupStatusByHashList get rollup status by hash list
func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
return nil, nil
}
var blockBatches []BlockBatch
err := o.db.Select("hash, rollup_status").Where("hash IN (?)", hashes).Find(&blockBatches).Error
if err != nil {
return nil, err
}
var (
statuses []types.RollupStatus
_statusMap = make(map[string]types.RollupStatus, len(hashes))
)
for _, _batch := range blockBatches {
_statusMap[_batch.Hash] = types.RollupStatus(_batch.RollupStatus)
}
for _, _hash := range hashes {
statuses = append(statuses, _statusMap[_hash])
}
return statuses, nil
}
// InsertBlockBatchByBatchData insert a block batch data by the BatchData
func (o *BlockBatch) InsertBlockBatchByBatchData(tx *gorm.DB, batchData *bridgeTypes.BatchData) (int64, error) {
var db *gorm.DB
if tx != nil {
db = tx
} else {
db = o.db
}
numBlocks := len(batchData.Batch.Blocks)
insertBlockBatch := BlockBatch{
Hash: batchData.Hash().Hex(),
Index: batchData.Batch.BatchIndex,
StartBlockNumber: batchData.Batch.Blocks[0].BlockNumber,
StartBlockHash: batchData.Batch.Blocks[0].BlockHash.Hex(),
EndBlockNumber: batchData.Batch.Blocks[numBlocks-1].BlockNumber,
EndBlockHash: batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(),
ParentHash: batchData.Batch.ParentBatchHash.Hex(),
StateRoot: batchData.Batch.NewStateRoot.Hex(),
TotalTxNum: batchData.TotalTxNum,
TotalL1TxNum: batchData.TotalL1TxNum,
TotalL2Gas: batchData.TotalL2Gas,
CreatedAt: time.Now(),
}
result := db.Create(&insertBlockBatch)
if result.Error != nil {
log.Error("failed to insert block batch by batchData", "err", result.Error)
return 0, result.Error
}
return result.RowsAffected, nil
}
// UpdateProvingStatus update the proving status
func (o *BlockBatch) UpdateProvingStatus(hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
default:
}
if err := o.db.Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateRollupStatus update the rollup status
func (o *BlockBatch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status)
switch status {
case types.RollupCommitted:
updateFields["committed_at"] = time.Now()
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
}
if err := o.db.Model(&BlockBatch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateSkippedBatches update the skipped batches
func (o *BlockBatch) UpdateSkippedBatches() (int64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.Model(&BlockBatch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
}
return result.RowsAffected, nil
}
// UpdateCommitTxHashAndRollupStatus update the commit tx hash and rollup status
func (o *BlockBatch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateFinalizeTxHashAndRollupStatus update the finalize tx hash and rollup status
func (o *BlockBatch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateL2GasOracleStatusAndOracleTxHash update the l2 gas oracle status and oracle tx hash
func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateProofByHash update the block batch proof by hash
// for unit test
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
if err != nil {
log.Error("failed to update proof", "err", err)
}
return err
}

View File

@@ -0,0 +1,155 @@
package orm
import (
"encoding/json"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge/internal/types"
)
// BlockTrace is structure of stored block trace message
type BlockTrace struct {
db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Trace string `json:"trace" gorm:"column:trace"`
BatchHash string `json:"batch_hash" gorm:"batch_hash;default:NULL"`
TxNum uint64 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
}
// NewBlockTrace create an blockTraceOrm instance
func NewBlockTrace(db *gorm.DB) *BlockTrace {
return &BlockTrace{db: db}
}
// TableName define the BlockTrace table name
func (*BlockTrace) TableName() string {
return "block_trace"
}
// GetL2BlocksLatestHeight get the l2 blocks latest height
func (o *BlockTrace) GetL2BlocksLatestHeight() (int64, error) {
result := o.db.Model(&BlockTrace{}).Select("COALESCE(MAX(number), -1)").Row()
if result.Err() != nil {
return -1, result.Err()
}
var maxNumber int64
if err := result.Scan(&maxNumber); err != nil {
return -1, err
}
return maxNumber, nil
}
// GetL2WrappedBlocks get the l2 wrapped blocks
func (o *BlockTrace) GetL2WrappedBlocks(fields map[string]interface{}) ([]*types.WrappedBlock, error) {
var blockTraces []BlockTrace
db := o.db.Select("trace")
for key, value := range fields {
db = db.Where(key, value)
}
if err := db.Find(&blockTraces).Error; err != nil {
return nil, err
}
var wrappedBlocks []*types.WrappedBlock
for _, v := range blockTraces {
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Trace), &wrappedBlock); err != nil {
break
}
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
return wrappedBlocks, nil
}
// GetL2BlockInfos get l2 block infos
func (o *BlockTrace) GetL2BlockInfos(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
var blockTraces []BlockTrace
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp")
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if err := db.Find(&blockTraces).Error; err != nil {
return nil, err
}
return blockTraces, nil
}
// GetUnbatchedL2Blocks get unbatched l2 blocks
func (o *BlockTrace) GetUnbatchedL2Blocks(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
var unbatchedBlockTraces []BlockTrace
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp").Where("batch_hash is NULL")
for key, value := range fields {
db = db.Where(key, value)
}
if err := db.Find(&unbatchedBlockTraces).Error; err != nil {
return nil, err
}
return unbatchedBlockTraces, nil
}
// InsertWrappedBlocks insert block to block trace
func (o *BlockTrace) InsertWrappedBlocks(blocks []*types.WrappedBlock) error {
var blockTraces []BlockTrace
for _, block := range blocks {
number := block.Header.Number.Uint64()
hash := block.Header.Hash().String()
txNum := len(block.Transactions)
mtime := block.Header.Time
gasCost := block.Header.GasUsed
data, err := json.Marshal(block)
if err != nil {
log.Error("failed to marshal block", "hash", hash, "err", err)
return err
}
tmpBlockTrace := BlockTrace{
Number: number,
Hash: hash,
ParentHash: block.Header.ParentHash.String(),
Trace: string(data),
TxNum: uint64(txNum),
GasUsed: gasCost,
BlockTimestamp: mtime,
}
blockTraces = append(blockTraces, tmpBlockTrace)
}
if err := o.db.Create(&blockTraces).Error; err != nil {
log.Error("failed to insert blockTraces", "err", err)
return err
}
return nil
}
// UpdateBatchHashForL2Blocks update the batch_hash of block trace
func (o *BlockTrace) UpdateBatchHashForL2Blocks(tx *gorm.DB, numbers []uint64, batchHash string) error {
var db *gorm.DB
if tx != nil {
db = tx
} else {
db = o.db
}
err := db.Model(&BlockTrace{}).Where("number IN (?)", numbers).Update("batch_hash", batchHash).Error
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,38 @@
package orm
import (
"errors"
"gorm.io/gorm"
bridgeTypes "scroll-tech/bridge/internal/types"
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
// in all blocks included in the batch.
func AddBatchInfoToDB(db *gorm.DB, batchData *bridgeTypes.BatchData) error {
blockBatch := NewBlockBatch(db)
blockTrace := NewBlockTrace(db)
err := db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatch.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTrace.UpdateBatchHashForL2Blocks(tx, blockIDs, batchData.Hash().Hex())
if dbTxErr != nil {
return dbTxErr
}
return nil
})
return err
}

View File

@@ -0,0 +1,87 @@
package orm
import (
"context"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Block is structure of stored l1 block message
type L1Block struct {
db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"`
GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
}
// NewL1Block create an l1Block instance
func NewL1Block(db *gorm.DB) *L1Block {
return &L1Block{db: db}
}
// TableName define the L1Block table name
func (*L1Block) TableName() string {
return "l1_block"
}
// GetLatestL1BlockHeight get the latest l1 block height
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) {
result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row()
if result.Err() != nil {
return 0, result.Err()
}
var maxNumber uint64
if err := result.Scan(&maxNumber); err != nil {
return 0, err
}
return maxNumber, nil
}
// GetL1Blocks get the l1 blocks
func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) {
var l1Blocks []L1Block
db := l.db
for key, value := range fields {
db = db.Where(key, value)
}
db = db.Order("number ASC")
if err := db.Find(&l1Blocks).Error; err != nil {
return nil, err
}
return l1Blocks, nil
}
// InsertL1Blocks batch insert l1 blocks
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 {
return nil
}
err := l.db.WithContext(ctx).Create(&blocks).Error
if err != nil {
log.Error("failed to insert L1 Blocks", "err", err)
}
return err
}
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{
"oracle_status": int(status),
"oracle_tx_hash": txHash,
}
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,124 @@
package orm
import (
"context"
"database/sql"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
db *gorm.DB `gorm:"column:-"`
QueueIndex uint64 `json:"queue_index" gorm:"column:queue_index"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
GasLimit uint64 `json:"gas_limit" gorm:"column:gas_limit"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Value string `json:"value" gorm:"column:value"`
Calldata string `json:"calldata" gorm:"column:calldata"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"`
}
// NewL1Message create an L1MessageOrm instance
func NewL1Message(db *gorm.DB) *L1Message {
return &L1Message{db: db}
}
// TableName define the L1Message table name
func (*L1Message) TableName() string {
return "l1_message"
}
// GetLayer1LatestWatchedHeight returns latest height stored in the table
func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
var maxHeight sql.NullInt64
result := m.db.Model(&L1Message{}).Select("MAX(height)").Scan(&maxHeight)
if result.Error != nil {
return -1, result.Error
}
if maxHeight.Valid {
return maxHeight.Int64, nil
}
return -1, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
var msgs []L1Message
err := m.db.Where("status", int(status)).Order("queue_index ASC").Limit(int(limit)).Find(&msgs).Error
if err != nil {
return nil, err
}
return msgs, nil
}
// GetL1MessageByQueueIndex fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByQueueIndex(queueIndex uint64) (*L1Message, error) {
var msg L1Message
err := m.db.Where("queue_index", queueIndex).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessageByMsgHash fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByMsgHash(msgHash string) (*L1Message, error) {
var msg L1Message
err := m.db.Where("msg_hash", msgHash).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL1Messages batch save a list of layer1 messages
func (m *L1Message) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
queueIndices := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
queueIndices = append(queueIndices, msg.QueueIndex)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1Messages", "queueIndices", queueIndices, "heights", heights, "err", err)
}
return err
}
// UpdateLayer1Status updates message stauts, given message hash
func (m *L1Message) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error; err != nil {
return err
}
return nil
}
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
func (m *L1Message) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
updateFields := map[string]interface{}{
"status": int(status),
"layer2_hash": layer2Hash,
}
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,127 @@
package orm
import (
"context"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
db *gorm.DB `gorm:"column:-"`
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Sender string `json:"sender" gorm:"column:sender"`
Value string `json:"value" gorm:"column:value"`
Target string `json:"target" gorm:"column:target"`
Calldata string `json:"calldata" gorm:"column:calldata"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:NULL"`
Proof string `json:"proof" gorm:"column:proof;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"`
}
// NewL2Message create an L2Message instance
func NewL2Message(db *gorm.DB) *L2Message {
return &L2Message{db: db}
}
// TableName define the L2Message table name
func (*L2Message) TableName() string {
return "l2_message"
}
// GetL2Messages fetch list of messages given msg status
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
var l2MsgList []L2Message
db := m.db
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if err := db.Find(&l2MsgList).Error; err != nil {
return nil, err
}
return l2MsgList, nil
}
// GetLayer2LatestWatchedHeight returns latest height stored in the table
func (m *L2Message) GetLayer2LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
result := m.db.Model(&L2Message{}).Select("COALESCE(MAX(height), -1)").Row()
if result.Err() != nil {
return -1, result.Err()
}
var maxNumber int64
if err := result.Scan(&maxNumber); err != nil {
return 0, err
}
return maxNumber, nil
}
// GetL2MessageByNonce fetch message by nonce
// for unit test
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
var msg L2Message
err := m.db.Where("nonce", nonce).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL2Messages batch save a list of layer2 messages
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
nonces = append(nonces, msg.Nonce)
heights = append(heights, msg.Height)
}
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
}
return err
}
// UpdateLayer2Status updates message stauts, given message hash
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error
if err != nil {
return err
}
return nil
}
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
updateFields := map[string]interface{}{
"status": int(status),
"layer1_hash": layer1Hash,
}
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,61 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -0,0 +1,86 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
create table block_trace
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
trace JSON NOT NULL,
batch_hash VARCHAR DEFAULT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL
);
create unique index block_trace_hash_uindex
on block_trace (hash);
create unique index block_trace_number_uindex
on block_trace (number);
create unique index block_trace_parent_uindex
on block_trace (number, parent_hash);
create unique index block_trace_parent_hash_uindex
on block_trace (hash, parent_hash);
create index block_trace_batch_hash_index
on block_trace (batch_hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_trace;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,49 @@
-- +goose Up
-- +goose StatementBegin
create table block_batch
(
hash VARCHAR NOT NULL,
index BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
start_block_hash VARCHAR NOT NULL,
end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
total_tx_num BIGINT NOT NULL,
total_l1_tx_num BIGINT NOT NULL,
total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL,
finalize_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
committed_at TIMESTAMP(0) DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index block_batch_hash_uindex
on block_batch (hash);
create unique index block_batch_index_uindex
on block_batch (index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_batch;
-- +goose StatementEnd

View File

@@ -0,0 +1,33 @@
-- +goose Up
-- +goose StatementBegin
create table l1_block
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
import_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL
);
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
create unique index l1_block_number_uindex
on l1_block (number);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd

View File

@@ -0,0 +1,236 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/abi"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BatchInfo, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
if i == 0 {
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

View File

@@ -0,0 +1,90 @@
package types
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(gethTypes.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(gethTypes.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &gethTypes.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}

View File

@@ -0,0 +1,21 @@
package types
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// BatchInfo contains the BlockBatch's main info
type BatchInfo struct {
Index uint64 `json:"index"`
Hash string `json:"hash"`
StateRoot string `json:"state_root"`
}

View File

@@ -0,0 +1,56 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -0,0 +1,134 @@
package utils
import (
"context"
"encoding/json"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
var (
tests = []struct {
input string
mustFail bool
expected rpc.BlockNumber
}{
{`"0x"`, true, rpc.BlockNumber(0)},
{`"0x0"`, false, rpc.BlockNumber(0)},
{`"0X1"`, false, rpc.BlockNumber(1)},
{`"0x00"`, true, rpc.BlockNumber(0)},
{`"0x01"`, true, rpc.BlockNumber(0)},
{`"0x1"`, false, rpc.BlockNumber(1)},
{`"0x12"`, false, rpc.BlockNumber(18)},
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
{"0", true, rpc.BlockNumber(0)},
{`"ff"`, true, rpc.BlockNumber(0)},
{`"safe"`, false, rpc.SafeBlockNumber},
{`"finalized"`, false, rpc.FinalizedBlockNumber},
{`"pending"`, false, rpc.PendingBlockNumber},
{`"latest"`, false, rpc.LatestBlockNumber},
{`"earliest"`, false, rpc.EarliestBlockNumber},
{`someString`, true, rpc.BlockNumber(0)},
{`""`, true, rpc.BlockNumber(0)},
{``, true, rpc.BlockNumber(0)},
}
)
func TestUnmarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
err := json.Unmarshal([]byte(test.input), &num)
if test.mustFail && err == nil {
t.Errorf("Test %d should fail", i)
continue
}
if !test.mustFail && err != nil {
t.Errorf("Test %d should pass but got err: %v", i, err)
continue
}
if num != test.expected {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.NoError(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.NoError(t, err)
got, err := json.Marshal(&num)
assert.NoError(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
}
type MockEthClient struct {
val uint64
}
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
return e.val, nil
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
var blockNumber int64
switch number.Int64() {
case int64(rpc.LatestBlockNumber):
blockNumber = int64(e.val)
case int64(rpc.SafeBlockNumber):
blockNumber = int64(e.val) - 6
case int64(rpc.FinalizedBlockNumber):
blockNumber = int64(e.val) - 12
default:
blockNumber = number.Int64()
}
if blockNumber < 0 {
blockNumber = 0
}
return &types.Header{Number: new(big.Int).SetInt64(blockNumber)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
testCases := []struct {
blockNumber uint64
confirmation rpc.BlockNumber
expectedResult uint64
}{
{5, 6, 0},
{7, 6, 1},
{10, 2, 8},
{0, 1, 0},
{3, 0, 3},
{15, 15, 0},
{16, rpc.SafeBlockNumber, 10},
{22, rpc.FinalizedBlockNumber, 10},
{10, rpc.LatestBlockNumber, 10},
{5, rpc.SafeBlockNumber, 0},
{11, rpc.FinalizedBlockNumber, 0},
}
for _, testCase := range testCases {
client.val = testCase.blockNumber
confirmed, err := GetLatestConfirmedBlockNumber(ctx, &client, testCase.confirmation)
assert.NoError(t, err)
assert.Equal(t, testCase.expectedResult, confirmed)
}
}

View File

@@ -0,0 +1,43 @@
package utils
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/bridge/internal/config"
)
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,65 @@
package utils
import (
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
bridgeAbi "scroll-tech/bridge/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := bridgeAbi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}

View File

@@ -0,0 +1,49 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := ComputeMessageHash(
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
big.NewInt(0),
big.NewInt(1),
[]byte("testbridgecontract"),
)
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
}
func TestBufferToUint256Le(t *testing.T) {
input := []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
expectedOutput := []*big.Int{big.NewInt(1)}
result := BufferToUint256Le(input)
assert.Equal(t, expectedOutput, result)
}

View File

@@ -1,11 +0,0 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

545
bridge/testdata/blockTrace_02.json vendored Normal file
View File

@@ -0,0 +1,545 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

12877
bridge/testdata/blockTrace_03.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -9,11 +9,15 @@ import (
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker" "scroll-tech/common/docker"
bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
) )
var ( var (
@@ -41,9 +45,24 @@ var (
l2MessengerAddress common.Address l2MessengerAddress common.Address
) )
func setupDB(t *testing.T) *gorm.DB {
cfg := &config.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
db, err := utils.InitDB(cfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../config.json") bridgeApp = bcmd.NewBridgeApp(base, "../conf/config.json")
m.Run() m.Run()
bridgeApp.Free() bridgeApp.Free()
base.Free() base.Free()

View File

@@ -6,24 +6,22 @@ import (
"testing" "testing"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testImportL1GasPrice(t *testing.T) { func testImportL1GasPrice(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(base.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -45,48 +43,39 @@ func testImportL1GasPrice(t *testing.T) {
err = l1Watcher.FetchBlockHeader(number) err = l1Watcher.FetchBlockHeader(number)
assert.NoError(t, err) assert.NoError(t, err)
l1BlockOrm := orm.NewL1Block(db)
// check db status // check db status
latestBlockHeight, err := db.GetLatestL1BlockHeight() latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight) assert.Equal(t, number, latestBlockHeight)
blocks, err := db.GetL1BlockInfos(map[string]interface{}{ blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
"number": latestBlockHeight,
})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOraclePending) assert.Empty(t, blocks[0].OracleTxHash)
assert.Equal(t, blocks[0].OracleTxHash.Valid, false) assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
// relay gas price // relay gas price
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
blocks, err = db.GetL1BlockInfos(map[string]interface{}{ blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
"number": latestBlockHeight,
})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOracleImporting) assert.NotEmpty(t, blocks[0].OracleTxHash)
assert.Equal(t, blocks[0].OracleTxHash.Valid, true) assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
} }
func testImportL2GasPrice(t *testing.T) { func testImportL2GasPrice(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(base.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config l2Cfg := bridgeApp.Config.L2Config
// Create L2Relayer
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig) l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
// add fake blocks // add fake blocks
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1), Number: big.NewInt(1),
ParentHash: common.Hash{}, ParentHash: common.Hash{},
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
@@ -96,32 +85,35 @@ func testImportL2GasPrice(t *testing.T) {
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{ blockTraceOrm := orm.NewBlockTrace(db)
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
parentBatch := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{ batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, l2Cfg.BatchProposerConfig.PublicInputConfig)
traces[0], blockBatchOrm := orm.NewBlockBatch(db)
}, l2Cfg.BatchProposerConfig.PublicInputConfig) err = db.Transaction(func(tx *gorm.DB) error {
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
// add fake batch if dbTxErr != nil {
dbTx, err := db.Beginx() return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
assert.NoError(t, dbTx.Commit())
// check db status // check db status
batch, err := db.GetLatestBatch() batch, err := blockBatchOrm.GetLatestBatch()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOraclePending) assert.Empty(t, batch.OracleTxHash)
assert.Equal(t, batch.OracleTxHash.Valid, false) assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOraclePending)
// relay gas price // relay gas price
l2Relayer.ProcessGasPriceOracle() l2Relayer.ProcessGasPriceOracle()
batch, err = db.GetLatestBatch() batch, err = blockBatchOrm.GetLatestBatch()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOracleImporting) assert.NotEmpty(t, batch.OracleTxHash)
assert.Equal(t, batch.OracleTxHash.Valid, true) assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOracleImporting)
} }

View File

@@ -13,19 +13,15 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" "scroll-tech/bridge/internal/utils"
"scroll-tech/database/migrate"
) )
func testRelayL1MessageSucceed(t *testing.T) { func testRelayL1MessageSucceed(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(base.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -56,21 +52,22 @@ func testRelayL1MessageSucceed(t *testing.T) {
// l1 watch process events // l1 watch process events
l1Watcher.FetchContractEvent() l1Watcher.FetchContractEvent()
l1MessageOrm := orm.NewL1Message(db)
// check db status // check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64()) msg, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgPending) assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending)
assert.Equal(t, msg.Target, l2Auth.From.String()) assert.Equal(t, msg.Target, l2Auth.From.String())
// process l1 messages // process l1 messages
l1Relayer.ProcessSavedEvents() l1Relayer.ProcessSavedEvents()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
l1Message, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgSubmitted) assert.NotEmpty(t, l1Message.Layer2Hash)
relayTxHash, err := db.GetRelayL1MessageTxHash(nonce.Uint64()) assert.Equal(t, types.MsgStatus(l1Message.Status), types.MsgSubmitted)
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid) relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(l1Message.Layer2Hash))
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err) assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx) relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -78,7 +75,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
// fetch message relayed events // fetch message relayed events
l2Watcher.FetchContractEvent() l2Watcher.FetchContractEvent()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64()) msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed) assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed)
} }

View File

@@ -2,30 +2,30 @@ package tests
import ( import (
"context" "context"
"errors"
"math/big" "math/big"
"testing" "testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testRelayL2MessageSucceed(t *testing.T) { func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(base.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -50,24 +50,27 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx) sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err) assert.NoError(t, err)
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if sendReceipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
// l2 watch process events // l2 watch process events
l2Watcher.FetchContractEvent() l2Watcher.FetchContractEvent()
l2MessageOrm := orm.NewL2Message(db)
blockTraceOrm := orm.NewBlockTrace(db)
blockBatchOrm := orm.NewBlockBatch(db)
// check db status // check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64()) msg, err := l2MessageOrm.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgPending) assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String()) assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String()) assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks // add fake blocks
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: sendReceipt.BlockNumber, Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{}, ParentHash: common.Hash{},
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
@@ -77,46 +80,56 @@ func testRelayL2MessageSucceed(t *testing.T) {
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces)) assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{ parentBatch := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{ batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, l2Cfg.BatchProposerConfig.PublicInputConfig)
traces[0],
}, l2Cfg.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String() batchHash := batchData.Hash().String()
// add fake batch // add fake batch
dbTx, err := db.Beginx() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
// add dummy proof // add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} proof := &message.AggProof{
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
// process pending batch and check status // process pending batch and check status
l2Relayer.SendCommitTx([]*types.BatchData{batchData}) assert.NoError(t, l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData}))
status, err := db.GetRollupStatus(batchHash)
blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitting, status) assert.Equal(t, 1, len(blockBatches))
commitTxHash, err := db.GetCommitTxHash(batchHash) assert.NotEmpty(t, blockBatches[0].CommitTxHash)
assert.NoError(t, err) assert.Equal(t, types.RollupCommitting, types.RollupStatus(blockBatches[0].RollupStatus))
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String)) commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash))
assert.NoError(t, err) assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -125,19 +138,21 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch CommitBatch rollup events // fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitted, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitted, statuses[0])
// process committed batch and check status // process committed batch and check status
l2Relayer.ProcessCommittedBatches() l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchHash)
blockBatchWithFinalizeTxHash, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status) assert.Equal(t, 1, len(blockBatchWithFinalizeTxHash))
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash) assert.NotEmpty(t, blockBatchWithFinalizeTxHash[0].FinalizeTxHash)
assert.NoError(t, err) assert.Equal(t, types.RollupFinalizing, types.RollupStatus(blockBatchWithFinalizeTxHash[0].RollupStatus))
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String)) finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatchWithFinalizeTxHash[0].FinalizeTxHash))
assert.NoError(t, err) assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -146,19 +161,21 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch FinalizeBatch events // fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalized, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
// process l2 messages // process l2 messages
l2Relayer.ProcessSavedEvents() l2Relayer.ProcessSavedEvents()
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
l2Messages, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"nonce": nonce.Uint64()}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgSubmitted) assert.Equal(t, 1, len(l2Messages))
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64()) assert.NotEmpty(t, l2Messages[0].Layer1Hash)
assert.NoError(t, err) assert.Equal(t, types.MsgStatus(l2Messages[0].Status), types.MsgSubmitted)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String)) relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(l2Messages[0].Layer1Hash))
assert.NoError(t, err) assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx) relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -167,7 +184,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch message relayed events // fetch message relayed events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64()) msg, err = l2MessageOrm.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed) assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed)
} }

View File

@@ -2,30 +2,29 @@ package tests
import ( import (
"context" "context"
"errors"
"math/big" "math/big"
"testing" "testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/types/message"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testCommitBatchAndFinalizeBatch(t *testing.T) { func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(base.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -38,58 +37,73 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Cfg := bridgeApp.Config.L1Config l1Cfg := bridgeApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
blockTraceOrm := orm.NewBlockTrace(db)
// add some blocks to db // add some blocks to db
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
var parentHash common.Hash var parentHash common.Hash
for i := 1; i <= 10; i++ { for i := 1; i <= 10; i++ {
header := geth_types.Header{ header := gethTypes.Header{
Number: big.NewInt(int64(i)), Number: big.NewInt(int64(i)),
ParentHash: parentHash, ParentHash: parentHash,
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
} }
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{ wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{
Header: &header, Header: &header,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}) })
parentHash = header.Hash() parentHash = header.Hash()
} }
assert.NoError(t, db.InsertWrappedBlocks(wrappedBlocks)) assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(wrappedBlocks))
parentBatch := &types.BlockBatch{ parentBatch := &bridgeTypes.BatchInfo{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
tmpWrapBlocks := []*bridgeTypes.WrappedBlock{
wrappedBlocks[0], wrappedBlocks[0],
wrappedBlocks[1], wrappedBlocks[1],
}, l2Cfg.BatchProposerConfig.PublicInputConfig) }
batchData := bridgeTypes.NewBatchData(parentBatch, tmpWrapBlocks, l2Cfg.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String() batchHash := batchData.Hash().String()
// add one batch to db blockBatchOrm := orm.NewBlockBatch(db)
dbTx, err := db.Beginx() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
// process pending batch and check status // process pending batch and check status
assert.NoError(t, l2Relayer.SendCommitTx([]*types.BatchData{batchData})) assert.NoError(t, l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData}))
status, err := db.GetRollupStatus(batchHash) blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitting, status) assert.Equal(t, 1, len(blockBatches))
commitTxHash, err := db.GetCommitTxHash(batchHash) assert.NotEmpty(t, true, blockBatches[0].CommitTxHash)
assert.NoError(t, err) assert.NotEmpty(t, true, blockBatches[0].RollupStatus)
assert.Equal(t, true, commitTxHash.Valid) assert.Equal(t, types.RollupStatus(blockBatches[0].RollupStatus), types.RollupCommitting)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash))
assert.NoError(t, err) assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -98,33 +112,35 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events // fetch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
ok := utils.TryTimes(20, func() bool { statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
status, err = db.GetRollupStatus(batchHash) assert.NoError(t, err)
return err == nil && status == types.RollupCommitted assert.Equal(t, 1, len(statuses))
}) assert.Equal(t, types.RollupCommitted, statuses[0])
assert.True(t, ok)
// add dummy proof // add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} proof := &message.AggProof{
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
// process committed batch and check status // process committed batch and check status
l2Relayer.ProcessCommittedBatches() l2Relayer.ProcessCommittedBatches()
ok = utils.TryTimes(20, func() bool { statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
status, err = db.GetRollupStatus(batchHash)
return err == nil && status == types.RollupFinalizing
})
assert.True(t, ok)
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid) assert.Equal(t, 1, len(statuses))
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String)) assert.Equal(t, types.RollupFinalizing, statuses[0])
blockBatches, err = blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err)
assert.Equal(t, 1, len(blockBatches))
assert.NotEmpty(t, blockBatches[0].FinalizeTxHash)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].FinalizeTxHash))
assert.NoError(t, err) assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -133,7 +149,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events // fetch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalized, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
} }

View File

@@ -1,199 +0,0 @@
package watcher
import (
"context"
"fmt"
"math"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/common/types"
)
func testBatchProposerProposeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
p := &BatchProposer{
batchGasThreshold: 1000,
batchTxNumThreshold: 10,
batchTimeSec: 300,
commitCalldataSizeLimit: 500,
orm: db,
}
patchGuard := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: longData,
}},
}}, nil
}
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: "short",
}},
}}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
block1 := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := &types.BlockInfo{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
block3 := &types.BlockInfo{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
block4 := &types.BlockInfo{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
blockOutdated := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
blockWithLongData := &types.BlockInfo{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
testCases := []struct {
description string
blocks []*types.BlockInfo
expectedRes bool
}{
{"Empty block list", []*types.BlockInfo{}, false},
{"Single block exceeding gas threshold", []*types.BlockInfo{block4}, true},
{"Single block exceeding transaction number threshold", []*types.BlockInfo{block3}, true},
{"Multiple blocks meeting thresholds", []*types.BlockInfo{block1, block2, block3}, true},
{"Multiple blocks not meeting thresholds", []*types.BlockInfo{block1, block2}, false},
{"Outdated and valid block", []*types.BlockInfo{blockOutdated, block2}, true},
{"Single block with long data", []*types.BlockInfo{blockWithLongData}, true},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
})
}
}
func testBatchProposerBatchGeneration(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.TryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func testBatchProposerGracefulRestart(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
batchData1 := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}, []*types.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -37,7 +37,7 @@ require (
github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
@@ -45,7 +45,7 @@ require (
github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.0 // indirect github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect

View File

@@ -151,8 +151,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -187,8 +188,8 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=

View File

@@ -233,31 +233,30 @@ const (
// BlockBatch is structure of stored block_batch // BlockBatch is structure of stored block_batch
type BlockBatch struct { type BlockBatch struct {
Hash string `json:"hash" db:"hash"` Hash string `json:"hash" db:"hash"`
Index uint64 `json:"index" db:"index"` Index uint64 `json:"index" db:"index"`
ParentHash string `json:"parent_hash" db:"parent_hash"` ParentHash string `json:"parent_hash" db:"parent_hash"`
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"` StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"` StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"` EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"` EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
StateRoot string `json:"state_root" db:"state_root"` StateRoot string `json:"state_root" db:"state_root"`
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"` TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"` TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"` TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"` ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"` Proof []byte `json:"proof" db:"proof"`
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"` ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"` RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"` OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"` CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"` FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"` OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"` CreatedAt *time.Time `json:"created_at" db:"created_at"`
CreatedAt *time.Time `json:"created_at" db:"created_at"` ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"` ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
ProvedAt *time.Time `json:"proved_at" db:"proved_at"` CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
CommittedAt *time.Time `json:"committed_at" db:"committed_at"` FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
} }
// AggTask is a wrapper type around db AggProveTask type. // AggTask is a wrapper type around db AggProveTask type.
@@ -269,6 +268,6 @@ type AggTask struct {
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"` EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"` ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"` Proof []byte `json:"proof" db:"proof"`
CreatedTime *time.Time `json:"created_time" db:"created_time"` CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"` UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
} }

View File

@@ -4,6 +4,8 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"crypto/rand" "crypto/rand"
"encoding/hex" "encoding/hex"
"errors"
"fmt"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/common/hexutil"
@@ -205,7 +207,7 @@ type TaskMsg struct {
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers. // For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
BlockHashes []common.Hash `json:"block_hashes,omitempty"` BlockHashes []common.Hash `json:"block_hashes,omitempty"`
// Only applicable for aggregator rollers. // Only applicable for aggregator rollers.
SubProofs [][]byte `json:"sub_proofs,omitempty"` SubProofs []*AggProof `json:"sub_proofs,omitempty"`
} }
// ProofDetail is the message received from rollers that contains zk proof, the status of // ProofDetail is the message received from rollers that contains zk proof, the status of
@@ -237,3 +239,26 @@ type AggProof struct {
Vk []byte `json:"vk"` Vk []byte `json:"vk"`
BlockCount uint `json:"block_count"` BlockCount uint `json:"block_count"`
} }
// SanityCheck checks whether an AggProof is in a legal format
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
func (ap *AggProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.Proof) == 0 {
return errors.New("proof not ready")
}
if len(ap.FinalPair) == 0 {
return errors.New("final_pair not ready")
}
if len(ap.Proof)%32 != 0 {
return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
}
if len(ap.FinalPair)%32 != 0 {
return fmt.Errorf("final_pair buffer has wrong length, expected: 32, got: %d", len(ap.FinalPair))
}
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v3.1.1" var tag = "v3.3.1"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -25,7 +25,7 @@ import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
contract DeployL1BridgeContracts is Script { contract DeployL1BridgeContracts is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY"); uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2"); uint32 CHAIN_ID_L2 = uint32(vm.envUint("CHAIN_ID_L2"));
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR"); address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR"); address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");

View File

@@ -154,7 +154,7 @@ contract EnforcedTxGateway is OwnableUpgradeable, ReentrancyGuardUpgradeable, Pa
} }
// append transaction // append transaction
IL1MessageQueue(messageQueue).appendEnforcedTransaction(_sender, _target, _value, _gasLimit, _data); IL1MessageQueue(_messageQueue).appendEnforcedTransaction(_sender, _target, _value, _gasLimit, _data);
// refund fee to `_refundAddress` // refund fee to `_refundAddress`
unchecked { unchecked {

View File

@@ -10,6 +10,7 @@ import {BatchHeaderV0Codec} from "../../libraries/codec/BatchHeaderV0Codec.sol";
import {ChunkCodec} from "../../libraries/codec/ChunkCodec.sol"; import {ChunkCodec} from "../../libraries/codec/ChunkCodec.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol"; import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
// solhint-disable no-inline-assembly
// solhint-disable reason-string // solhint-disable reason-string
/// @title ScrollChain /// @title ScrollChain
@@ -39,7 +40,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
*************/ *************/
/// @notice The chain id of the corresponding layer 2 chain. /// @notice The chain id of the corresponding layer 2 chain.
uint256 public immutable layer2ChainId; uint32 public immutable layer2ChainId;
/************* /*************
* Variables * * Variables *
@@ -83,7 +84,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
* Constructor * * Constructor *
***************/ ***************/
constructor(uint256 _chainId) { constructor(uint32 _chainId) {
layer2ChainId = _chainId; layer2ChainId = _chainId;
} }
@@ -295,7 +296,9 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified"); require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
// compute public input hash // compute public input hash
bytes32 _publicInputHash = keccak256(abi.encode(_prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)); bytes32 _publicInputHash = keccak256(
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
);
// verify batch // verify batch
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash); IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
// solhint-disable no-inline-assembly
/// @dev Below is the encoding for `BatchHeader` V0, total 89 + ceil(l1MessagePopped / 256) * 32 bytes. /// @dev Below is the encoding for `BatchHeader` V0, total 89 + ceil(l1MessagePopped / 256) * 32 bytes.
/// ```text /// ```text
/// * Field Bytes Type Index Comments /// * Field Bytes Type Index Comments

View File

@@ -0,0 +1,36 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
contract Fallback is Ownable {
using SafeERC20 for IERC20;
/// @notice Withdraw stucked token from this contract.
/// @param _token The address of token to withdraw, use `address(0)` if withdraw ETH.
/// @param _amount The amount of token to withdraw.
/// @param _recipient The address of receiver.
function withdraw(
address _token,
uint256 _amount,
address _recipient
) external onlyOwner {
if (_token == address(0)) {
(bool _success, ) = _recipient.call{value: _amount}("");
require(_success, "transfer ETH failed");
} else {
IERC20(_token).safeTransfer(_recipient, _amount);
}
}
/// @notice Execute an arbitrary message.
/// @param _target The address of contract to call.
/// @param _data The calldata passed to target contract.
function execute(address _target, bytes calldata _data) external payable onlyOwner {
(bool _success, ) = _target.call{value: msg.value}(_data);
require(_success, "call failed");
}
}

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator .PHONY: lint docker clean coordinator mock_coordinator
IMAGE_NAME=coordinator IMAGE_NAME=coordinator
IMAGE_VERSION=latest IMAGE_VERSION=latest
@@ -25,6 +25,9 @@ libzkp:
coordinator: libzkp ## Builds the Coordinator instance. coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier: libzkp test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier go test -tags ffi -timeout 0 -v ./verifier

View File

@@ -14,7 +14,9 @@ require (
require ( require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect

View File

@@ -31,7 +31,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -42,7 +43,8 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=

View File

@@ -359,7 +359,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
// store proof content // store proof content
if msg.Type == message.BasicProve { if msg.Type == message.BasicProve {
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil { if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store basic proof into db", "error", dbErr) log.Error("failed to store basic proof into db", "error", dbErr)
return dbErr return dbErr
} }

View File

@@ -13,8 +13,8 @@ create table l1_message
layer1_hash VARCHAR NOT NULL, layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL, layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1, status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
); );
comment comment
@@ -32,7 +32,7 @@ create index l1_message_height_index
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN
NEW.updated_time = CURRENT_TIMESTAMP; NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW; RETURN NEW;
END; END;
$$ language 'plpgsql'; $$ language 'plpgsql';

View File

@@ -13,8 +13,8 @@ create table l2_message
layer1_hash VARCHAR DEFAULT NULL, layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL, proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1, status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
); );
comment comment
@@ -32,7 +32,7 @@ create index l2_message_height_index
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN
NEW.updated_time = CURRENT_TIMESTAMP; NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW; RETURN NEW;
END; END;
$$ language 'plpgsql'; $$ language 'plpgsql';

View File

@@ -16,7 +16,6 @@ create table block_batch
total_l2_gas BIGINT NOT NULL, total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1, proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL, proof BYTEA DEFAULT NULL,
instance_commitments BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0, proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1, rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL, commit_tx_hash VARCHAR DEFAULT NULL,

View File

@@ -10,8 +10,8 @@ create table agg_task
end_batch_hash VARCHAR NOT NULL, end_batch_hash VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1, proving_status SMALLINT DEFAULT 1,
proof BYTEA DEFAULT NULL, proof BYTEA DEFAULT NULL,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP updated_at TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
); );
create unique index agg_task_hash_uindex create unique index agg_task_hash_uindex
@@ -21,7 +21,7 @@ create unique index agg_task_hash_uindex
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN
NEW.updated_time = CURRENT_TIMESTAMP; NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW; RETURN NEW;
END; END;
$$ language 'plpgsql'; $$ language 'plpgsql';

View File

@@ -20,7 +20,7 @@ func NewAggTaskOrm(db *sqlx.DB) AggTaskOrm {
return &aggTaskOrm{db: db} return &aggTaskOrm{db: db}
} }
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([][]byte, error) { func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error) {
var ( var (
startIdx uint64 startIdx uint64
endIdx uint64 endIdx uint64
@@ -34,14 +34,20 @@ func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([][]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
var subProofs [][]byte var subProofs []*message.AggProof
for rows.Next() { for rows.Next() {
var proofByt []byte var proofByt []byte
err = rows.Scan(&proofByt) err = rows.Scan(&proofByt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
subProofs = append(subProofs, proofByt)
var proof message.AggProof
if err := json.Unmarshal(proofByt, &proof); err != nil {
return nil, err
}
subProofs = append(subProofs, &proof)
} }
return subProofs, nil return subProofs, nil
} }

View File

@@ -3,6 +3,7 @@ package orm
import ( import (
"context" "context"
"database/sql" "database/sql"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
@@ -12,6 +13,7 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message"
) )
type blockBatchOrm struct { type blockBatchOrm struct {
@@ -62,22 +64,31 @@ func (o *blockBatchOrm) GetProvingStatusByHash(hash string) (types.ProvingStatus
return status, nil return status, nil
} }
func (o *blockBatchOrm) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) { func (o *blockBatchOrm) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
var proof []byte var proofBytes []byte
var instanceCommitments []byte row := o.db.QueryRow(`SELECT proof FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified)
row := o.db.QueryRow(`SELECT proof, instance_commitments FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified) if err := row.Scan(&proofBytes); err != nil {
return nil, err
if err := row.Scan(&proof, &instanceCommitments); err != nil {
return nil, nil, err
} }
return proof, instanceCommitments, nil
var proof message.AggProof
if err := json.Unmarshal(proofBytes, &proof); err != nil {
return nil, err
}
return &proof, nil
} }
func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error { func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
db := o.db db := o.db
if _, err := db.ExecContext(ctx, if _, err := db.ExecContext(ctx,
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where hash = ?;`), db.Rebind(`UPDATE block_batch set proof = ?, proof_time_sec = ? where hash = ?;`),
proof, instanceCommitments, proofTimeSec, hash, proofBytes, proofTimeSec, hash,
); err != nil { ); err != nil {
log.Error("failed to update proof", "err", err) log.Error("failed to update proof", "err", err)
} }

View File

@@ -4,11 +4,11 @@ import (
"context" "context"
"database/sql" "database/sql"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
) )
// L1BlockOrm l1_block operation interface // L1BlockOrm l1_block operation interface
@@ -50,7 +50,7 @@ type SessionInfoOrm interface {
type AggTaskOrm interface { type AggTaskOrm interface {
GetAssignedAggTasks() ([]*types.AggTask, error) GetAssignedAggTasks() ([]*types.AggTask, error)
GetUnassignedAggTasks() ([]*types.AggTask, error) GetUnassignedAggTasks() ([]*types.AggTask, error)
GetSubProofsByAggTaskID(id string) ([][]byte, error) GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error)
InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error
UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error
UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error
@@ -60,8 +60,8 @@ type AggTaskOrm interface {
type BlockBatchOrm interface { type BlockBatchOrm interface {
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error) GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error)
GetProvingStatusByHash(hash string) (types.ProvingStatus, error) GetProvingStatusByHash(hash string) (types.ProvingStatus, error)
GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) GetVerifiedProofByHash(hash string) (*message.AggProof, error)
UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error
UpdateProvingStatus(hash string, status types.ProvingStatus) error UpdateProvingStatus(hash string, status types.ProvingStatus) error
ResetProvingStatusFor(before types.ProvingStatus) error ResetProvingStatusFor(before types.ProvingStatus) error
NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error

View File

@@ -73,8 +73,11 @@ var (
}, },
} }
proof1 = []byte{1} proof1 = &message.AggProof{
subProofs = [][]byte{proof1} Proof: []byte{1},
FinalPair: []byte{2},
}
subProofs = []*message.AggProof{proof1}
aggTask1 = &types.AggTask{ID: "test-agg-1"} aggTask1 = &types.AggTask{ID: "test-agg-1"}
aggTask2 = &types.AggTask{ID: "test-agg-2"} aggTask2 = &types.AggTask{ID: "test-agg-2"}
@@ -344,7 +347,7 @@ func testOrmBlockBatch(t *testing.T) {
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1) provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus) assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, []byte{2}, 1200) err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, 1200)
assert.NoError(t, err) assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified) err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
@@ -489,7 +492,7 @@ func testOrmAggTask(t *testing.T) {
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1) provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus) assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, []byte{2}, 1200) err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, 1200)
assert.NoError(t, err) assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified) err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -250,6 +250,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gotestyourself/gotestyourself v1.4.0 h1:CDSlSIuRL/Fsc72Ln5lMybtrCvSRDddsHsDRG/nP7Rg=
github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
@@ -278,7 +279,6 @@ github.com/iris-contrib/pongo2 v0.0.1 h1:zGP7pW51oi5eQZMIlGA3I+FHY9/HOQWDB+572yi
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=
@@ -411,7 +411,6 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCAy1QjzINvKe/pYtLjo2dl59x2w9YSEJxuY= github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCAy1QjzINvKe/pYtLjo2dl59x2w9YSEJxuY=
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
@@ -432,7 +431,6 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/n
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
@@ -448,7 +446,6 @@ golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
@@ -456,7 +453,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNT
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@@ -466,16 +462,13 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -488,7 +481,6 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
@@ -499,7 +491,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
@@ -527,6 +518,7 @@ gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUk
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0=
gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=

View File

@@ -29,12 +29,12 @@ libzkp:
roller: libzkp ## Build the Roller instance. roller: libzkp ## Build the Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
mock_roller: ## Build the mocked Roller instance.
GOBIN=$(PWD)/build/bin go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/roller ./cmd
gpu-roller: libzkp ## Build the GPU Roller instance. gpu-roller: libzkp ## Build the GPU Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd
mock_roller:
GOBIN=$(PWD)/build/bin go build -tags mock_prover -o $(PWD)/build/bin/roller $(PWD)/cmd
test-prover: libzkp test-prover: libzkp
go test -tags ffi -timeout 0 -v ./prover go test -tags ffi -timeout 0 -v ./prover

View File

@@ -15,18 +15,21 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect
@@ -41,7 +44,7 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.9.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/sys v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@@ -18,8 +18,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -29,15 +29,17 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
@@ -59,11 +61,13 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -131,12 +135,13 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -3,10 +3,13 @@
package prover package prover
import ( import (
"scroll-tech/common/types/message" "math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
"scroll-tech/common/types/message"
"scroll-tech/roller/config" "scroll-tech/roller/config"
) )
@@ -22,9 +25,10 @@ func NewProver(cfg *config.ProverConfig) (*Prover, error) {
// Prove call rust ffi to generate proof, if first failed, try again. // Prove call rust ffi to generate proof, if first failed, try again.
func (p *Prover) Prove(taskID string, traces []*types.BlockTrace) (*message.AggProof, error) { func (p *Prover) Prove(taskID string, traces []*types.BlockTrace) (*message.AggProof, error) {
_empty := common.BigToHash(big.NewInt(0))
return &message.AggProof{ return &message.AggProof{
Proof: []byte{}, Proof: _empty[:],
Instance: []byte{}, Instance: _empty[:],
FinalPair: []byte{}, FinalPair: _empty[:],
}, nil }, nil
} }

View File

@@ -14,8 +14,10 @@ require (
github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect

View File

@@ -9,12 +9,12 @@ github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsP
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=

View File

@@ -36,7 +36,7 @@ var (
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/config.json") bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/config.json") coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/config.json")
rollerApp = rapp.NewRollerApp(base, "../../roller/config.json", coordinatorApp.WSEndpoint()) rollerApp = rapp.NewRollerApp(base, "../../roller/config.json", coordinatorApp.WSEndpoint())
m.Run() m.Run()