Compare commits

...

8 Commits

Author SHA1 Message Date
georgehao
12ae3d48f7 feat: update db sql show 2023-05-19 10:08:20 +08:00
georgehao
0380c754ce feat: update 2023-05-18 15:53:39 +08:00
georgehao
75276bc111 feat: update 2023-05-18 15:51:49 +08:00
georgehao
e956f6b3ea feat: all done +1 2023-05-18 15:50:55 +08:00
georgehao
a71ede0eb1 feat: all is done 2023-05-18 11:08:38 +08:00
georgehao
0e22f0d75f feat: change string to error 2023-05-17 17:07:33 +08:00
georgehao
d7be94049d feat: fix all error 2023-05-17 16:25:26 +08:00
georgehao
dd420574a2 feat: refactor 2023-05-16 22:10:11 +08:00
78 changed files with 16980 additions and 1552 deletions

View File

@@ -1,114 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +1,104 @@
package main package main
import "scroll-tech/bridge/cmd/event_watcher/app" import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
)
var app *cli.App
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := utils.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err := utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
func main() { func main() {
app.Run() if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
} }

View File

@@ -1,136 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +1,126 @@
package main package main
import "scroll-tech/bridge/cmd/gas_oracle/app" import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
)
var app *cli.App
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := utils.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err := utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
func main() { func main() {
app.Run() if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
} }

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
) )
// MockApp mockApp-test client manager. // MockApp mockApp-test client manager.

View File

@@ -1,118 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +1,108 @@
package main package main
import "scroll-tech/bridge/cmd/msg_relayer/app" import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/utils"
)
var app *cli.App
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := utils.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err := utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
func main() { func main() {
app.Run() if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
} }

View File

@@ -1,133 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run rollup relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +1,123 @@
package main package main
import "scroll-tech/bridge/cmd/rollup_relayer/app" import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/metrics"
cutils "scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
)
var app *cli.App
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := utils.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err := utils.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, db)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
func main() { func main() {
app.Run() if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
} }

View File

@@ -90,6 +90,8 @@
"driver_name": "postgres", "driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable", "dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200, "maxOpenNum": 200,
"maxIdleNum": 20 "maxIdleNum": 20,
"slow_sql_threshold": "3s",
"is_show_sql": true
} }
} }

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
create table block_trace
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
trace JSON NOT NULL,
batch_hash VARCHAR DEFAULT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL
);
create unique index block_trace_hash_uindex
on block_trace (hash);
create unique index block_trace_number_uindex
on block_trace (number);
create unique index block_trace_parent_uindex
on block_trace (number, parent_hash);
create unique index block_trace_parent_hash_uindex
on block_trace (hash, parent_hash);
create index block_trace_batch_hash_index
on block_trace (batch_hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_trace;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table block_batch
(
hash VARCHAR NOT NULL,
index BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
start_block_hash VARCHAR NOT NULL,
end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
total_tx_num BIGINT NOT NULL,
total_l1_tx_num BIGINT NOT NULL,
total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL,
instance_commitments BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL,
finalize_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
committed_at TIMESTAMP(0) DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index block_batch_hash_uindex
on block_batch (hash);
create unique index block_batch_index_uindex
on block_batch (index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_batch;
-- +goose StatementEnd

View File

@@ -0,0 +1,33 @@
-- +goose Up
-- +goose StatementBegin
create table l1_block
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
import_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL
);
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
create unique index l1_block_number_uindex
on l1_block (number);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd

View File

@@ -26,6 +26,11 @@ require (
github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -50,4 +55,6 @@ require (
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
gorm.io/driver/postgres v1.5.0 // indirect
gorm.io/gorm v1.25.1 // indirect
) )

View File

@@ -15,6 +15,7 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -48,17 +49,31 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c= github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
@@ -87,6 +102,7 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
@@ -108,8 +124,11 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -122,33 +141,60 @@ github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bC
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
@@ -158,5 +204,10 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=

View File

@@ -4,15 +4,13 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/database"
) )
// Config load configuration items. // Config load configuration items.
type Config struct { type Config struct {
L1Config *L1Config `json:"l1_config"` L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"` L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"` DBConfig *DBConfig `json:"db_config"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.

View File

@@ -0,0 +1,37 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"time"
)
// DBConfig db config
type DBConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driver_name"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
SlowSqlThreshold time.Duration `json:"slow_sql_threshold"`
ShowSql bool `json:"show_sql"`
}
// NewDBConfig returns a new instance of Config.
func NewDBConfig(file string) (*DBConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &DBConfig{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types" "scroll-tech/bridge/internal/types"
) )
// L2Config loads l2geth configuration items. // L2Config loads l2geth configuration items.

View File

@@ -6,27 +6,25 @@ import (
"math/big" "math/big"
// not sure if this will make problems when relay with l1geth // not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
) )
var ( var (
bridgeL1MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
) )
// Layer1Relayer is responsible for // Layer1Relayer is responsible for
@@ -38,7 +36,6 @@ var (
type Layer1Relayer struct { type Layer1Relayer struct {
ctx context.Context ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig cfg *config.RelayerConfig
// channel used to communicate with transaction sender // channel used to communicate with transaction sender
@@ -53,10 +50,13 @@ type Layer1Relayer struct {
lastGasPrice uint64 lastGasPrice uint64
minGasPrice uint64 minGasPrice uint64
gasPriceDiff uint64 gasPriceDiff uint64
l1MessageOrm *orm.L1Message
l1Block *orm.L1Block
} }
// NewLayer1Relayer will return a new instance of Layer1RelayerClient // NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) { func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys) messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil { if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey) addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
@@ -88,21 +88,21 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
} }
l1Relayer := &Layer1Relayer{ l1Relayer := &Layer1Relayer{
ctx: ctx, cfg: cfg,
db: db, ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1Block: orm.NewL1Block(db),
messageSender: messageSender, messageSender: messageSender,
l2MessengerABI: bridge_abi.L2ScrollMessengerABI, l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender, gasOracleSender: gasOracleSender,
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI, l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay, minGasLimitForMessageRelay: minGasLimitForMessageRelay,
minGasPrice: minGasPrice, minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff, gasPriceDiff: gasPriceDiff,
cfg: cfg,
} }
go l1Relayer.handleConfirmLoop(ctx) go l1Relayer.handleConfirmLoop(ctx)
@@ -112,7 +112,7 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain // ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() { func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order // msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(types.MsgPending, 100) msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
if err != nil { if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err) log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return return
@@ -123,7 +123,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
} }
for _, msg := range msgs { for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil { if err = r.processSavedEvent(&msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err) log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
} }
@@ -132,15 +132,15 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
} }
} }
func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error { func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
calldata := common.Hex2Bytes(msg.Calldata) calldata := common.Hex2Bytes(msg.Calldata)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay) hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" { if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired) return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
} }
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed) if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
} }
if err != nil { if err != nil {
return err return err
@@ -148,7 +148,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
bridgeL1MsgsRelayedTotalCounter.Inc(1) bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash) log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String()) err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil { if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err) log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
} }
@@ -157,13 +157,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2 // ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() { func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.db.GetLatestL1BlockHeight() latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return return
} }
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{ blocks, err := r.l1Block.GetL1BlockInfos(map[string]interface{}{
"number": latestBlockHeight, "number": latestBlockHeight,
}) })
if err != nil { if err != nil {
@@ -176,7 +176,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
} }
block := blocks[0] block := blocks[0]
if block.GasOracleStatus == types.GasOraclePending { if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff) // last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) { if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
@@ -195,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return return
} }
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String()) err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil { if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err) log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return return
@@ -214,14 +214,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.messageSender.ConfirmChan(): case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1) bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String()) err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err) log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String()) err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err) log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
} }
@@ -230,14 +230,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -5,18 +5,19 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" "scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
templateL1Message = []*types.L1Message{ templateL1Message = []*orm.L1Message{
{ {
QueueIndex: 1, QueueIndex: 1,
MsgHash: "msg_hash1", MsgHash: "msg_hash1",
@@ -42,52 +43,51 @@ var (
} }
) )
func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
// testCreateNewRelayer test create new relayer instance and stop // testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) { func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
} }
func testL1RelayerProcessSaveEvents(t *testing.T) { func testL1RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err) l1MessageOrm := orm.NewL1Message(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.SaveL1Messages(context.Background(), templateL1Message)) assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
relayer.ProcessSavedEvents() relayer.ProcessSavedEvents()
msg1, err := db.GetL1MessageByQueueIndex(1) msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg1.Status, types.MsgSubmitted) assert.Equal(t, msg1.Status, types.MsgSubmitted)
msg2, err := db.GetL1MessageByQueueIndex(2) msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg2.Status, types.MsgSubmitted) assert.Equal(t, msg2.Status, types.MsgSubmitted)
} }
func testL1RelayerMsgConfirm(t *testing.T) { func testL1RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL1Messages(context.Background(),
[]*types.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}))
// Create and set up the Layer1 Relayer. // Create and set up the Layer1 Relayer.
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -107,27 +107,25 @@ func testL1RelayerMsgConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1MessageByMsgHash("msg-1") msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
msg2, err2 := db.GetL1MessageByMsgHash("msg-2") msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed && return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
}) })
assert.True(t, ok) assert.True(t, ok)
} }
func testL1RelayerGasOracleConfirm(t *testing.T) { func testL1RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL1RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err) l1BlockOrm := orm.NewL1Block(db)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}
// Insert test data. // Insert test data.
assert.NoError(t, db.InsertL1Blocks(context.Background(), assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
[]*types.L1BlockInfo{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}))
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
@@ -148,10 +146,10 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"}) msg1, err1 := l1BlockOrm.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"}) msg2, err2 := l1BlockOrm.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && msg1[0].GasOracleStatus == types.GasOracleImported && return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && msg2[0].GasOracleStatus == types.GasOracleFailed err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
}) })
assert.True(t, ok) assert.True(t, ok)
} }

View File

@@ -3,9 +3,10 @@ package relayer
import ( import (
"context" "context"
"errors" "errors"
"fmt" "gorm.io/gorm"
"math/big" "math/big"
"runtime" "runtime"
"scroll-tech/bridge/internal/orm"
"sync" "sync"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -13,29 +14,28 @@ import (
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"modernc.org/mathutil" "modernc.org/mathutil"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/internal/config"
bridge_abi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/config" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/utils"
) )
var ( var (
bridgeL2MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry) bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
) )
// Layer2Relayer is responsible for // Layer2Relayer is responsible for
@@ -49,7 +49,10 @@ type Layer2Relayer struct {
l2Client *ethclient.Client l2Client *ethclient.Client
db database.OrmFactory blockBatchOrm *orm.BlockBatch
blockTraceOrm *orm.BlockTrace
l2MessageOrm *orm.L2Message
cfg *config.RelayerConfig cfg *config.RelayerConfig
messageSender *sender.Sender messageSender *sender.Sender
@@ -81,7 +84,7 @@ type Layer2Relayer struct {
} }
// NewLayer2Relayer will return a new instance of Layer2RelayerClient // NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) { func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize // @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys) messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil { if err != nil {
@@ -118,18 +121,21 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
layer2Relayer := &Layer2Relayer{ layer2Relayer := &Layer2Relayer{
ctx: ctx, ctx: ctx,
db: db,
blockBatchOrm: orm.NewBlockBatch(db),
l2MessageOrm: orm.NewL2Message(db),
blockTraceOrm: orm.NewBlockTrace(db),
l2Client: l2Client, l2Client: l2Client,
messageSender: messageSender, messageSender: messageSender,
l1MessengerABI: bridge_abi.L1ScrollMessengerABI, l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
rollupSender: rollupSender, rollupSender: rollupSender,
l1RollupABI: bridge_abi.ScrollChainABI, l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender, gasOracleSender: gasOracleSender,
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI, l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay, minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -149,19 +155,23 @@ const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain // ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() { func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch() batch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus([]types.RollupStatus{types.RollupFinalized})
if err != nil { if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err) log.Error("GetLatestFinalizedBatch failed", "err", err)
return return
} }
// msgs are sorted by nonce in increasing order // msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages( fields := map[string]interface{}{
map[string]interface{}{"status": types.MsgPending}, "status": types.MsgPending,
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber), "height<=": batch.EndBlockNumber,
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit), }
) orderByList := []string{
"nonce ASC",
}
limit := processMsgLimit
msgs, err := r.l2MessageOrm.GetL2Messages(fields, orderByList, limit)
if err != nil { if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err) log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return return
@@ -177,7 +187,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
for _, msg := range msgs[:size] { for _, msg := range msgs[:size] {
msg := msg msg := msg
g.Go(func() error { g.Go(func() error {
return r.processSavedEvent(msg) return r.processSavedEvent(&msg)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@@ -189,24 +199,23 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
} }
} }
func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error { func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
// @todo fetch merkle proof from l2geth // @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height) log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// Get the block info that contains the message // Get the block info that contains the message
blockInfos, err := r.db.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}) blockInfos, err := r.blockTraceOrm.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}, nil, 0)
if err != nil { if err != nil {
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height) log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
} }
blockInfo := blockInfos[0] if len(blockInfos) == 0 {
if !blockInfo.BatchHash.Valid { return errors.New("get block trace len is 0, exit")
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
return nil
} }
blockInfo := blockInfos[0]
// TODO: rebuild the withdraw trie to generate the merkle proof // TODO: rebuild the withdraw trie to generate the merkle proof
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{ proof := bridgeAbi.IL1ScrollMessengerL2MessageProof{
BatchHash: common.HexToHash(blockInfo.BatchHash.String), BatchHash: common.HexToHash(blockInfo.BatchHash),
MerkleProof: make([]byte, 0), MerkleProof: make([]byte, 0),
} }
from := common.HexToAddress(msg.Sender) from := common.HexToAddress(msg.Sender)
@@ -227,11 +236,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
} }
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay) hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" { if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired) return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
} }
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" { if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed) return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
} }
if err != nil { if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
@@ -244,7 +253,7 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// save status in db // save status in db
// @todo handle db error // @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String()) err = r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil { if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err) log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err return err
@@ -255,57 +264,59 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// ProcessGasPriceOracle imports gas price to layer1 // ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() { func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.db.GetLatestBatch() batch, err := r.blockBatchOrm.GetLatestBatch()
if err != nil { if err != nil {
log.Error("Failed to GetLatestBatch", "err", err) log.Error("Failed to GetLatestBatch", "err", err)
return return
} }
if batch.OracleStatus == types.GasOraclePending { if types.GasOracleStatus(batch.OracleStatus) != types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx) return
}
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil { if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err) log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return return
} }
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff) hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) { if err != nil {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice) if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
if err != nil { log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
} }
return
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
} }
err = r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
} }
} }
// SendCommitTx sends commitBatches tx to L1. // SendCommitTx sends commitBatches tx to L1.
func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error { func (r *Layer2Relayer) SendCommitTx(batchData []*bridgeTypes.BatchData) error {
if len(batchData) == 0 { if len(batchData) == 0 {
log.Error("SendCommitTx receives empty batch") log.Error("SendCommitTx receives empty batch")
return nil return nil
} }
// pack calldata // pack calldata
commitBatches := make([]bridge_abi.IScrollChainBatch, len(batchData)) commitBatches := make([]bridgeAbi.IScrollChainBatch, len(batchData))
for i, batch := range batchData { for i, batch := range batchData {
commitBatches[i] = batch.Batch commitBatches[i] = batch.Batch
} }
@@ -341,7 +352,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
batchHashes := make([]string, len(batchData)) batchHashes := make([]string, len(batchData))
for i, batch := range batchData { for i, batch := range batchData {
batchHashes[i] = batch.Hash().Hex() batchHashes[i] = batch.Hash().Hex()
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting) err = r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
if err != nil { if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err) log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
} }
@@ -353,7 +364,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
// ProcessCommittedBatches submit proof to layer 1 rollup contract // ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation // set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil { if count, err := r.blockBatchOrm.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err) log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway // continue anyway
} else if count > 0 { } else if count > 0 {
@@ -362,7 +373,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
} }
// batches are sorted by batch index in increasing order // batches are sorted by batch index in increasing order
batchHashes, err := r.db.GetCommittedBatches(1) batchHashes, err := r.blockBatchOrm.GetCommittedBatches(1)
if err != nil { if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err) log.Error("Failed to fetch committed L2 batches", "err", err)
return return
@@ -373,7 +384,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
hash := batchHashes[0] hash := batchHashes[0]
// @todo add support to relay multiple batches // @todo add support to relay multiple batches
batches, err := r.db.GetBlockBatches(map[string]interface{}{"hash": hash}, "LIMIT 1") batches, err := r.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": hash}, nil, 1)
if err != nil { if err != nil {
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err) log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
return return
@@ -384,33 +395,31 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
} }
batch := batches[0] batch := batches[0]
status := batch.ProvingStatus status := types.ProvingStatus(batch.ProvingStatus)
switch status { switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet. // The proof for this block is not ready yet.
return return
case types.ProvingTaskProved: case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified // It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified. // the proof yet. We don't roll up the proof until it's verified.
return return
case types.ProvingTaskFailed, types.ProvingTaskSkipped: case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake // note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
case types.ProvingTaskVerified: case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash) log.Info("Start to roll up zk proof", "hash", hash)
success := false success := false
previousBatch, err := r.db.GetLatestFinalizingOrFinalizedBatch() rollupStatues := []types.RollupStatus{
types.RollupFinalizing,
types.RollupFinalized,
}
previousBatch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus(rollupStatues)
// skip submitting proof // skip submitting proof
if err == nil && uint64(batch.CreatedAt.Sub(*previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec { if err == nil && uint64(batch.CreatedAt.Sub(previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
log.Info( log.Info(
"Not enough time passed, skipping", "Not enough time passed, skipping",
"hash", hash, "hash", hash,
@@ -420,7 +429,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"lastFinalizingCreatedAt", previousBatch.CreatedAt, "lastFinalizingCreatedAt", previousBatch.CreatedAt,
) )
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} else { } else {
success = true success = true
@@ -439,13 +448,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// TODO: need to revisit this and have a more fine-grained error handling // TODO: need to revisit this and have a more fine-grained error handling
if !success { if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash) log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
} }
}() }()
proofBuffer, icBuffer, err := r.db.GetVerifiedProofAndInstanceCommitmentsByHash(hash) proofBuffer, icBuffer, err := r.blockBatchOrm.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
if err != nil { if err != nil {
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err) log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
return return
@@ -485,7 +494,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash) log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
// record and sync with db, @todo handle db error // record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing) err = r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil { if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err) log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
} }
@@ -512,7 +521,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
} }
// @todo handle db error // @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String()) err := r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err) log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
} }
@@ -533,7 +542,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
} }
for _, batchHash := range batchHashes { for _, batchHash := range batchHashes {
// @todo handle db error // @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status) err := r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
if err != nil { if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err) log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
} }
@@ -553,7 +562,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
} }
// @todo handle db error // @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status) err := r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil { if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err) log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
} }
@@ -575,14 +584,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -3,26 +3,29 @@ package relayer
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"gorm.io/gorm"
"math/big" "math/big"
"os" "os"
"strconv" "strconv"
"testing" "testing"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/bridge/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" "scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/database/migrate" bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
templateL2Message = []*types.L2Message{ templateL2Message = []orm.L2Message{
{ {
Nonce: 1, Nonce: 1,
Height: 1, Height: 1,
@@ -35,144 +38,170 @@ var (
} }
) )
func testCreateNewRelayer(t *testing.T) { func setupL2RelayerDB(t *testing.T) *gorm.DB {
// Create db handler and reset db. db, err := bridgeUtils.InitDB(cfg.DBConfig)
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB)) sqlDB, err := db.DB()
defer db.Close() assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
} }
func testL2RelayerProcessSaveEvents(t *testing.T) { func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
err = db.SaveL2Messages(context.Background(), templateL2Message) l2MessageOrm := orm.NewL2Message(db)
err = l2MessageOrm.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err) assert.NoError(t, err)
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)), Number: big.NewInt(int64(templateL2Message[0].Height)),
}, },
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)), Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
}, },
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{ blockTraceOrm := orm.NewBlockTrace(db)
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
blockBatchOrm := orm.NewBlockBatch(db)
parentBatch1 := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: common.Hash{}.Hex(), Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(), StateRoot: common.Hash{}.Hex(),
} }
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil) batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex() batchHash := batchData1.Hash().Hex()
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{1}, batchHash)) err = db.Transaction(func(tx *gorm.DB) error {
assert.NoError(t, dbTx.Commit()) rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, []uint64{1}, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized) err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessSavedEvents() relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce) msg, err := l2MessageOrm.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.MsgSubmitted, msg.Status) assert.Equal(t, types.MsgSubmitted, msg.Status)
} }
func testL2RelayerProcessCommittedBatches(t *testing.T) { func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
parentBatch1 := &types.BlockBatch{ parentBatch1 := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: common.Hash{}.Hex(), Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(), StateRoot: common.Hash{}.Hex(),
} }
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
assert.NoError(t, err) batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex() batchHash := batchData1.Hash().Hex()
err = dbTx.Commit() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted) err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessCommittedBatches() relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchHash) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
} }
func testL2RelayerSkipBatches(t *testing.T) { func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
blockBatchOrm := orm.NewBlockBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string { createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchData := genBatchData(t, index) batchData := genBatchData(t, index)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData)) err = db.Transaction(func(tx *gorm.DB) error {
batchHash := batchData.Hash().Hex() rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
err = dbTx.Commit() if dbTxErr != nil {
assert.NoError(t, err) return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
return nil
})
err = db.UpdateRollupStatus(context.Background(), batchHash, rollupStatus) batchHash := batchData.Hash().Hex()
assert.NoError(t, err)
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
assert.NoError(t, err) assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, provingStatus) err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus)
assert.NoError(t, err) assert.NoError(t, err)
return batchHash return batchHash
} }
@@ -196,29 +225,30 @@ func testL2RelayerSkipBatches(t *testing.T) {
relayer.ProcessCommittedBatches() relayer.ProcessCommittedBatches()
for _, id := range skipped { for _, id := range skipped {
status, err := db.GetRollupStatus(id) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
} }
for _, id := range notSkipped { for _, id := range notSkipped {
status, err := db.GetRollupStatus(id) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEqual(t, types.RollupFinalizationSkipped, status) assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
} }
} }
func testL2RelayerMsgConfirm(t *testing.T) { func testL2RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
l2MessageOrm := orm.NewL2Message(db)
insertL2Mssages := []orm.L2Message{
{MsgHash: "msg-1", Nonce: 0},
{MsgHash: "msg-2", Nonce: 1},
}
err := l2MessageOrm.SaveL2Messages(context.Background(), insertL2Mssages)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL2Messages(context.Background(), []*types.L2Message{
{MsgHash: "msg-1", Nonce: 0}, {MsgHash: "msg-2", Nonce: 1},
}))
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -241,32 +271,46 @@ func testL2RelayerMsgConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
assert.True(t, utils.TryTimes(5, func() bool { assert.True(t, utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL2MessageByMsgHash("msg-1") fields1 := map[string]interface{}{"msg_hash": "msg-1"}
msg2, err2 := db.GetL2MessageByMsgHash("msg-2") msg1, err1 := l2MessageOrm.GetL2Messages(fields1, nil, 0)
return err1 == nil && msg1.Status == types.MsgConfirmed && if len(msg1) != 1 {
err2 == nil && msg2.Status == types.MsgRelayFailed return false
}
fields2 := map[string]interface{}{"msg_hash": "msg-2"}
msg2, err2 := l2MessageOrm.GetL2Messages(fields2, nil, 0)
if len(msg2) != 1 {
return false
}
return err1 == nil && types.MsgStatus(msg1[0].Status) == types.MsgConfirmed &&
err2 == nil && types.MsgStatus(msg2[0].Status) == types.MsgRelayFailed
})) }))
} }
func testL2RelayerRollupConfirm(t *testing.T) { func testL2RelayerRollupConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data. // Insert test data.
batches := make([]*types.BatchData, 6) batches := make([]*bridgeTypes.BatchData, 6)
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
batches[i] = genBatchData(t, uint64(i)) batches[i] = genBatchData(t, uint64(i))
} }
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
err := db.Transaction(func(tx *gorm.DB) error {
for _, batch := range batches {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -310,8 +354,8 @@ func testL2RelayerRollupConfirm(t *testing.T) {
} }
for i, batch := range batches[:6] { for i, batch := range batches[:6] {
batchInDB, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}) batchInDB, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
if err != nil || len(batchInDB) != 1 || batchInDB[0].RollupStatus != expectedStatuses[i] { if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
return false return false
} }
} }
@@ -321,24 +365,30 @@ func testL2RelayerRollupConfirm(t *testing.T) {
} }
func testL2RelayerGasOracleConfirm(t *testing.T) { func testL2RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it. db := setupL2RelayerDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer bridgeUtils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data. // Insert test data.
batches := make([]*types.BatchData, 2) batches := make([]*bridgeTypes.BatchData, 2)
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
batches[i] = genBatchData(t, uint64(i)) batches[i] = genBatchData(t, uint64(i))
} }
dbTx, err := db.Beginx() blockBatchOrm := orm.NewBlockBatch(db)
err := db.Transaction(func(tx *gorm.DB) error {
for _, batch := range batches {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -360,8 +410,8 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed} expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, batch := range batches { for i, batch := range batches {
gasOracle, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}) gasOracle, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
if err != nil || len(gasOracle) != 1 || gasOracle[0].OracleStatus != expectedStatuses[i] { if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
return false return false
} }
} }
@@ -370,17 +420,17 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
assert.True(t, ok) assert.True(t, ok)
} }
func genBatchData(t *testing.T, index uint64) *types.BatchData { func genBatchData(t *testing.T, index uint64) *bridgeTypes.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock := &types.WrappedBlock{} wrappedBlock := &bridgeTypes.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock) err = json.Unmarshal(templateBlockTrace, wrappedBlock)
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16)) wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
parentBatch := &types.BlockBatch{ parentBatch := &orm.BlockBatch{
Index: index, Index: index,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
return types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil) return bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{wrappedBlock}, nil)
} }

View File

@@ -0,0 +1,18 @@
package relayer
import "errors"
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)
var (
ErrExecutionRevertedMessageExpired = errors.New("execution reverted: Message expired")
ErrExecutionRevertedAlreadySuccessExecuted = errors.New("execution reverted: Message was already successfully executed")
)

View File

@@ -10,9 +10,10 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -25,12 +26,12 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
// batch data // batch data
batchData1 *types.BatchData batchData1 *bridgeTypes.BatchData
batchData2 *types.BatchData batchData2 *bridgeTypes.BatchData
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
@@ -42,43 +43,48 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig cfg.DBConfig = &bridgeTypes.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = base.L2Client()
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
parentBatch1 := &types.BlockBatch{ parentBatch1 := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729", Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d", StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
} }
batchData1 = types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil) batchData1 = bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
parentBatch2 := &types.BlockBatch{ parentBatch2 := &orm.BlockBatch{
Index: batchData1.Batch.BatchIndex, Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(), Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(), StateRoot: batchData1.Batch.NewStateRoot.String(),
} }
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil) batchData2 = bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex()) log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())

View File

@@ -17,8 +17,8 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/utils" "scroll-tech/bridge/internal/utils"
) )
const ( const (

View File

@@ -9,8 +9,6 @@ import (
"testing" "testing"
"time" "time"
"golang.org/x/sync/errgroup"
cmap "github.com/orcaman/concurrent-map" cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
@@ -18,10 +16,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
) )
const TXBatch = 50 const TXBatch = 50

View File

@@ -8,68 +8,32 @@ import (
"time" "time"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
bridgeabi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
) )
var ( var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry) bridgeL2BatchesGasOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry) bridgeL2BatchesTxsOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry) bridgeL2BatchesBlocksCreatedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry) bridgeL2BatchesCommitsSentTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry) bridgeL2BatchesTxsCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry) bridgeL2BatchesGasCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
) )
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
// in all blocks included in the batch.
func AddBatchInfoToDB(db database.OrmFactory, batchData *types.BatchData) error {
dbTx, err := db.Beginx()
if err != nil {
return err
}
var dbTxErr error
defer func() {
if dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
}
}()
if dbTxErr = db.NewBatchInDBTx(dbTx, batchData); dbTxErr != nil {
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
if dbTxErr = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchData.Hash().Hex()); dbTxErr != nil {
return dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
}
// BatchProposer sends batches commit transactions to relayer. // BatchProposer sends batches commit transactions to relayer.
type BatchProposer struct { type BatchProposer struct {
mutex sync.Mutex mutex sync.Mutex
ctx context.Context
ctx context.Context db *gorm.DB
orm database.OrmFactory
batchTimeSec uint64 batchTimeSec uint64
batchGasThreshold uint64 batchGasThreshold uint64
@@ -81,18 +45,23 @@ type BatchProposer struct {
commitCalldataMinSize uint64 commitCalldataMinSize uint64
proofGenerationFreq uint64 proofGenerationFreq uint64
batchDataBuffer []*types.BatchData batchDataBuffer []*bridgeTypes.BatchData
relayer *relayer.Layer2Relayer relayer *relayer.Layer2Relayer
piCfg *types.PublicInputHashConfig blockBatchOrm *orm.BlockBatch
blockTraceOrm *orm.BlockTrace
piCfg *bridgeTypes.PublicInputHashConfig
} }
// NewBatchProposer will return a new instance of BatchProposer. // NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer { func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, db *gorm.DB) *BatchProposer {
p := &BatchProposer{ p := &BatchProposer{
mutex: sync.Mutex{}, mutex: sync.Mutex{},
ctx: ctx, ctx: ctx,
orm: orm, db: db,
blockBatchOrm: orm.NewBlockBatch(db),
blockTraceOrm: orm.NewBlockTrace(db),
batchTimeSec: cfg.BatchTimeSec, batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold, batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold, batchTxNumThreshold: cfg.BatchTxNumThreshold,
@@ -117,7 +86,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
func (p *BatchProposer) recoverBatchDataBuffer() { func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order // batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32) batchHashes, err := p.blockBatchOrm.GetPendingBatches(math.MaxInt32)
if err != nil { if err != nil {
log.Crit("Failed to fetch pending L2 batches", "err", err) log.Crit("Failed to fetch pending L2 batches", "err", err)
} }
@@ -127,17 +96,17 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
log.Info("Load pending batches into batchDataBuffer") log.Info("Load pending batches into batchDataBuffer")
// helper function to cache and get BlockBatch from DB // helper function to cache and get BlockBatch from DB
blockBatchCache := make(map[string]*types.BlockBatch) blockBatchCache := make(map[string]orm.BlockBatch)
getBlockBatch := func(batchHash string) (*types.BlockBatch, error) { getBlockBatch := func(batchHash string) (*orm.BlockBatch, error) {
if blockBatch, ok := blockBatchCache[batchHash]; ok { if blockBatch, ok := blockBatchCache[batchHash]; ok {
return blockBatch, nil return &blockBatch, nil
} }
blockBatches, err := p.orm.GetBlockBatches(map[string]interface{}{"hash": batchHash}) blockBatches, err := p.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 0)
if err != nil || len(blockBatches) == 0 { if err != nil || len(blockBatches) == 0 {
return nil, err return nil, err
} }
blockBatchCache[batchHash] = blockBatches[0] blockBatchCache[batchHash] = blockBatches[0]
return blockBatches[0], nil return &blockBatches[0], nil
} }
// recover the in-memory batchData from DB // recover the in-memory batchData from DB
@@ -155,23 +124,26 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
continue continue
} }
blockInfos, err := p.orm.GetL2BlockInfos( whereFileds := map[string]interface{}{
map[string]interface{}{"batch_hash": batchHash}, "batch_hash": batchHash,
"order by number ASC", }
) orderByList := []string{
"number ASC",
}
blockTraces, err := p.blockTraceOrm.GetL2BlockInfos(whereFileds, orderByList, 0)
if err != nil { if err != nil {
log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err) log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err)
continue continue
} }
if len(blockInfos) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) { if len(blockTraces) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) {
log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB", log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB",
"len(blockInfos)", len(blockInfos), "len(blockInfos)", len(blockTraces),
"expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) "expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1)
continue continue
} }
batchData, err := p.generateBatchData(parentBatch, blockInfos) batchData, err := p.generateBatchData(parentBatch, blockTraces)
if err != nil { if err != nil {
continue continue
} }
@@ -192,16 +164,14 @@ func (p *BatchProposer) TryProposeBatch() {
defer p.mutex.Unlock() defer p.mutex.Unlock()
for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit { for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
blocks, err := p.orm.GetUnbatchedL2Blocks( orderBy := []string{"number ASC"}
map[string]interface{}{}, blockTraces, err := p.blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, orderBy, int(p.batchBlocksLimit))
fmt.Sprintf("order by number ASC LIMIT %d", p.batchBlocksLimit),
)
if err != nil { if err != nil {
log.Error("failed to get unbatched blocks", "err", err) log.Error("failed to get unbatched blocks", "err", err)
return return
} }
batchCreated := p.proposeBatch(blocks) batchCreated := p.proposeBatch(blockTraces)
// while size of batchDataBuffer < commitCalldataMinSize, // while size of batchDataBuffer < commitCalldataMinSize,
// proposer keeps fetching and porposing batches. // proposer keeps fetching and porposing batches.
@@ -230,7 +200,7 @@ func (p *BatchProposer) TryCommitBatches() {
commit := false commit := false
calldataByteLen := uint64(0) calldataByteLen := uint64(0)
for ; index < len(p.batchDataBuffer); index++ { for ; index < len(p.batchDataBuffer); index++ {
calldataByteLen += bridgeabi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch) calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
if calldataByteLen > p.commitCalldataSizeLimit { if calldataByteLen > p.commitCalldataSizeLimit {
commit = true commit = true
if index == 0 { if index == 0 {
@@ -262,13 +232,13 @@ func (p *BatchProposer) TryCommitBatches() {
} }
} }
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool { func (p *BatchProposer) proposeBatch(blockTraces []orm.BlockTrace) bool {
if len(blocks) == 0 { if len(blockTraces) == 0 {
return false return false
} }
approximatePayloadSize := func(hash string) (uint64, error) { approximatePayloadSize := func(hash string) (uint64, error) {
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash}) traces, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -282,46 +252,46 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
return uint64(size), nil return uint64(size), nil
} }
firstSize, err := approximatePayloadSize(blocks[0].Hash) firstSize, err := approximatePayloadSize(blockTraces[0].Hash)
if err != nil { if err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
return false return false
} }
if firstSize > p.commitCalldataSizeLimit { if firstSize > p.commitCalldataSizeLimit {
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize) log.Warn("oversized payload even for only 1 block", "height", blockTraces[0].Number, "size", firstSize)
// note: we should probably fail here once we can ensure this will not happen // note: we should probably fail here once we can ensure this will not happen
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
return false return false
} }
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
return true return true
} }
if blocks[0].GasUsed > p.batchGasThreshold { if blockTraces[0].GasUsed > p.batchGasThreshold {
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1) bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed) log.Warn("gas overflow even for only 1 block", "height", blockTraces[0].Number, "gas", blockTraces[0].GasUsed)
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
} }
return true return true
} }
if blocks[0].TxNum > p.batchTxNumThreshold { if blockTraces[0].TxNum > p.batchTxNumThreshold {
bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1) bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1)
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum) log.Warn("too many txs even for only 1 block", "height", blockTraces[0].Number, "tx_num", blockTraces[0].TxNum)
if err := p.createBatchForBlocks(blocks[:1]); err != nil { if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err) log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
} }
return true return true
@@ -330,7 +300,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
var gasUsed, txNum, payloadSize uint64 var gasUsed, txNum, payloadSize uint64
reachThreshold := false reachThreshold := false
// add blocks into batch until reach batchGasThreshold // add blocks into batch until reach batchGasThreshold
for i, block := range blocks { for i, block := range blockTraces {
size, err := approximatePayloadSize(block.Hash) size, err := approximatePayloadSize(block.Hash)
if err != nil { if err != nil {
log.Error("failed to create batch", "number", block.Number, "err", err) log.Error("failed to create batch", "number", block.Number, "err", err)
@@ -338,7 +308,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
} }
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) { if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
blocks = blocks[:i] blockTraces = blockTraces[:i]
reachThreshold = true reachThreshold = true
break break
} }
@@ -350,23 +320,23 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch: // if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch, // if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch // otherwise we will still propose a batch
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) { if !reachThreshold && blockTraces[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return false return false
} }
if err := p.createBatchForBlocks(blocks); err != nil { if err := p.createBatchForBlocks(blockTraces); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err) log.Error("failed to create batch", "from", blockTraces[0].Number, "to", blockTraces[len(blockTraces)-1].Number, "err", err)
} else { } else {
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum)) bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed)) bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks))) bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blockTraces)))
} }
return true return true
} }
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error { func (p *BatchProposer) createBatchForBlocks(blocks []orm.BlockTrace) error {
lastBatch, err := p.orm.GetLatestBatch() lastBatch, err := p.blockBatchOrm.GetLatestBatch()
if err != nil { if err != nil {
// We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block. // We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block.
return err return err
@@ -378,7 +348,7 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
return err return err
} }
if err := AddBatchInfoToDB(p.orm, batchData); err != nil { if err := orm.AddBatchInfoToDB(p.db, batchData); err != nil {
log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err) log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err)
return err return err
} }
@@ -387,22 +357,28 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
return nil return nil
} }
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) { func (p *BatchProposer) generateBatchData(parentBatch *orm.BlockBatch, blocks []orm.BlockTrace) (*bridgeTypes.BatchData, error) {
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
for _, block := range blocks { for _, block := range blocks {
trs, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash}) trs, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash})
if err != nil || len(trs) != 1 { if err != nil || len(trs) != 1 {
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err) log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
return nil, err return nil, err
} }
wrappedBlocks = append(wrappedBlocks, trs[0])
tmpWrappedBlock := bridgeTypes.WrappedBlock{
Header: trs[0].Header,
Transactions: trs[0].Transactions,
WithdrawTrieRoot: trs[0].WithdrawTrieRoot,
}
wrappedBlocks = append(wrappedBlocks, &tmpWrappedBlock)
} }
return types.NewBatchData(parentBatch, wrappedBlocks, p.piCfg), nil return bridgeTypes.NewBatchData(parentBatch, wrappedBlocks, p.piCfg), nil
} }
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) { func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
for _, batchData := range p.batchDataBuffer { for _, batchData := range p.batchDataBuffer {
size += bridgeabi.GetBatchCalldataLength(&batchData.Batch) size += bridgeAbi.GetBatchCalldataLength(&batchData.Batch)
} }
return return
} }

View File

@@ -0,0 +1,206 @@
package watcher
import (
"context"
"math"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
gethTtypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
)
func testBatchProposerProposeBatch(t *testing.T) {
db := setupDB(t)
defer bridgeUtils.CloseDB(db)
p := &BatchProposer{
batchGasThreshold: 1000,
batchTxNumThreshold: 10,
batchTimeSec: 300,
commitCalldataSizeLimit: 500,
}
patchGuard := gomonkey.ApplyMethodFunc(db, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
return []*types.WrappedBlock{{
Transactions: []*gethTtypes.TransactionData{{
Data: longData,
}},
}}, nil
}
return []*types.WrappedBlock{{
Transactions: []*gethTtypes.TransactionData{{
Data: "short",
}},
}}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
block1 := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := orm.BlockTrace{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
block3 := orm.BlockTrace{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
block4 := orm.BlockTrace{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
blockOutdated := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
blockWithLongData := orm.BlockTrace{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
testCases := []struct {
description string
blocks []orm.BlockTrace
expectedRes bool
}{
{"Empty block list", []orm.BlockTrace{}, false},
{"Single block exceeding gas threshold", []orm.BlockTrace{block4}, true},
{"Single block exceeding transaction number threshold", []orm.BlockTrace{block3}, true},
{"Multiple blocks meeting thresholds", []orm.BlockTrace{block1, block2, block3}, true},
{"Multiple blocks not meeting thresholds", []orm.BlockTrace{block1, block2}, false},
{"Outdated and valid block", []orm.BlockTrace{blockOutdated, block2}, true},
{"Single block with long data", []orm.BlockTrace{blockWithLongData}, true},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
})
}
}
func testBatchProposerBatchGeneration(t *testing.T) {
db := setupDB(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
bridgeUtils.CloseDB(db)
cancel()
}()
blockTraceOrm := orm.NewBlockTrace(db)
// Insert traces into db.
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
blockBatchOrm := orm.NewBlockBatch(db)
batch, err := blockBatchOrm.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := bridgeTypes.NewBatchData(&orm.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.TryProposeBatch()
infos, err := blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, []string{"number ASC"}, 100)
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData.Hash().Hex()}, nil, 1)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
}
func testBatchProposerGracefulRestart(t *testing.T) {
db := setupDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
blockTraceOrm := orm.NewBlockTrace(db)
// Insert traces into db.
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
insertBlockBatch := &orm.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
wrapperBlock := []*bridgeTypes.WrappedBlock{wrappedBlock1}
batchData1 := bridgeTypes.NewBatchData(insertBlockBatch, wrapperBlock, nil)
parentBatch2 := &orm.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
blockBatchOrm := orm.NewBlockBatch(db)
err = db.Transaction(func(tx *gorm.DB) error {
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
if dbTxErr != nil {
return dbTxErr
}
_, dbTxErr = blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData2)
if dbTxErr != nil {
return dbTxErr
}
numbers1 := []uint64{batchData1.Batch.Blocks[0].BlockNumber}
hash1 := batchData1.Hash().Hex()
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers1, hash1)
if dbTxErr != nil {
return dbTxErr
}
numbers2 := []uint64{batchData2.Batch.Blocks[0].BlockNumber}
hash2 := batchData2.Hash().Hex()
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers2, hash2)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err)
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized)
assert.NoError(t, err)
batchHashes, err := blockBatchOrm.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = blockBatchOrm.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData2.Hash().Hex()}, nil, 1)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
}

View File

@@ -1,6 +1,8 @@
package watcher package watcher
import "github.com/scroll-tech/go-ethereum/common" import (
"github.com/scroll-tech/go-ethereum/common"
)
const contractEventsBlocksFetchLimit = int64(10) const contractEventsBlocksFetchLimit = int64(10)

View File

@@ -2,33 +2,31 @@ package watcher
import ( import (
"context" "context"
"math/big"
geth "github.com/scroll-tech/go-ethereum" geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"math/big"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/internal/orm"
bridge_abi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/utils"
) )
var ( var (
bridgeL1MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry) bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry) bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry) bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
) )
type rollupEvent struct { type rollupEvent struct {
@@ -39,9 +37,12 @@ type rollupEvent struct {
// L1WatcherClient will listen for smart contract events from Eth L1. // L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct { type L1WatcherClient struct {
ctx context.Context ctx context.Context
client *ethclient.Client client *ethclient.Client
db database.OrmFactory l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
l1BlockOrm *orm.L1Block
l1BatchOrm *orm.BlockBatch
// The number of new blocks to wait for a block to be confirmed // The number of new blocks to wait for a block to be confirmed
confirmations rpc.BlockNumber confirmations rpc.BlockNumber
@@ -62,17 +63,19 @@ type L1WatcherClient struct {
} }
// NewL1WatcherClient returns a new instance of L1WatcherClient. // NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient { func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
savedHeight, err := db.GetLayer1LatestWatchedHeight() l1MessageOrm := orm.NewL1Message(db)
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch height from db", "err", err) log.Warn("Failed to fetch height from db", "err", err)
savedHeight = 0 savedHeight = 0
} }
if savedHeight < int64(startHeight) { if savedHeight < startHeight {
savedHeight = int64(startHeight) savedHeight = startHeight
} }
savedL1BlockHeight, err := db.GetLatestL1BlockHeight() l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0 savedL1BlockHeight = 0
@@ -84,17 +87,20 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
return &L1WatcherClient{ return &L1WatcherClient{
ctx: ctx, ctx: ctx,
client: client, client: client,
db: db, l1MessageOrm: l1MessageOrm,
l1BlockOrm: l1BlockOrm,
l1BatchOrm: orm.NewBlockBatch(db),
l2MessageOrm: orm.NewL2Message(db),
confirmations: confirmations, confirmations: confirmations,
messengerAddress: messengerAddress, messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1ScrollMessengerABI, messengerABI: bridgeAbi.L1ScrollMessengerABI,
messageQueueAddress: messageQueueAddress, messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L1MessageQueueABI, messageQueueABI: bridgeAbi.L1MessageQueueABI,
scrollChainAddress: scrollChainAddress, scrollChainAddress: scrollChainAddress,
scrollChainABI: bridge_abi.ScrollChainABI, scrollChainABI: bridgeAbi.ScrollChainABI,
processedMsgHeight: uint64(savedHeight), processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight, processedBlockHeight: savedL1BlockHeight,
@@ -130,17 +136,17 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1 toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
} }
var blocks []*types.L1BlockInfo var blocks []orm.L1Block
var err error var err error
height := fromBlock height := fromBlock
for ; height <= toBlock; height++ { for ; height <= toBlock; height++ {
var block *geth_types.Header var block *gethTypes.Header
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height)) block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
if err != nil { if err != nil {
log.Warn("Failed to get block", "height", height, "err", err) log.Warn("Failed to get block", "height", height, "err", err)
break break
} }
blocks = append(blocks, &types.L1BlockInfo{ blocks = append(blocks, orm.L1Block{
Number: uint64(height), Number: uint64(height),
Hash: block.Hash().String(), Hash: block.Hash().String(),
BaseFee: block.BaseFee.Uint64(), BaseFee: block.BaseFee.Uint64(),
@@ -154,7 +160,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
toBlock = height - 1 toBlock = height - 1
// insert succeed blocks // insert succeed blocks
err = w.db.InsertL1Blocks(w.ctx, blocks) err = w.l1BlockOrm.InsertL1Blocks(w.ctx, blocks)
if err != nil { if err != nil {
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err) log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
return err return err
@@ -198,11 +204,11 @@ func (w *L1WatcherClient) FetchContractEvent() error {
Topics: make([][]common.Hash, 1), Topics: make([][]common.Hash, 1),
} }
query.Topics[0] = make([]common.Hash, 5) query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = bridge_abi.L1QueueTransactionEventSignature query.Topics[0][0] = bridgeAbi.L1QueueTransactionEventSignature
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature query.Topics[0][1] = bridgeAbi.L1RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature query.Topics[0][2] = bridgeAbi.L1FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature query.Topics[0][3] = bridgeAbi.L1CommitBatchEventSignature
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature query.Topics[0][4] = bridgeAbi.L1FinalizeBatchEventSignature
logs, err := w.client.FilterLogs(w.ctx, query) logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil { if err != nil {
@@ -234,7 +240,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
for _, event := range rollupEvents { for _, event := range rollupEvents {
batchHashes = append(batchHashes, event.batchHash.String()) batchHashes = append(batchHashes, event.batchHash.String())
} }
statuses, err := w.db.GetRollupStatusByHashList(batchHashes) statuses, err := w.l1BatchOrm.GetRollupStatusByHashList(batchHashes)
if err != nil { if err != nil {
log.Error("Failed to GetRollupStatusByHashList", "err", err) log.Error("Failed to GetRollupStatusByHashList", "err", err)
return err return err
@@ -250,9 +256,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
// only update when db status is before event status // only update when db status is before event status
if event.status > status { if event.status > status {
if event.status == types.RollupFinalized { if event.status == types.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) err = w.l1BatchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} else if event.status == types.RollupCommitted { } else if event.status == types.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) err = w.l1BatchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} }
if err != nil { if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err) log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
@@ -270,13 +276,13 @@ func (w *L1WatcherClient) FetchContractEvent() error {
} else { } else {
msgStatus = types.MsgFailed msgStatus = types.MsgFailed
} }
if err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil { if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err) log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err return err
} }
} }
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil { if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err return err
} }
@@ -287,17 +293,16 @@ func (w *L1WatcherClient) FetchContractEvent() error {
return nil return nil
} }
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) { func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l1Messages []*orm.L1Message
var l1Messages []*types.L1Message
var relayedMessages []relayedMessage var relayedMessages []relayedMessage
var rollupEvents []rollupEvent var rollupEvents []rollupEvent
for _, vLog := range logs { for _, vLog := range logs {
switch vLog.Topics[0] { switch vLog.Topics[0] {
case bridge_abi.L1QueueTransactionEventSignature: case bridgeAbi.L1QueueTransactionEventSignature:
event := bridge_abi.L1QueueTransactionEvent{} event := bridgeAbi.L1QueueTransactionEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog) err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err) log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
@@ -306,7 +311,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
msgHash := common.BytesToHash(crypto.Keccak256(event.Data)) msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
l1Messages = append(l1Messages, &types.L1Message{ l1Messages = append(l1Messages, &orm.L1Message{
QueueIndex: event.QueueIndex.Uint64(), QueueIndex: event.QueueIndex.Uint64(),
MsgHash: msgHash.String(), MsgHash: msgHash.String(),
Height: vLog.BlockNumber, Height: vLog.BlockNumber,
@@ -317,8 +322,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
GasLimit: event.GasLimit.Uint64(), GasLimit: event.GasLimit.Uint64(),
Layer1Hash: vLog.TxHash.Hex(), Layer1Hash: vLog.TxHash.Hex(),
}) })
case bridge_abi.L1RelayedMessageEventSignature: case bridgeAbi.L1RelayedMessageEventSignature:
event := bridge_abi.L1RelayedMessageEvent{} event := bridgeAbi.L1RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err) log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
@@ -330,8 +335,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: true, isSuccessful: true,
}) })
case bridge_abi.L1FailedRelayedMessageEventSignature: case bridgeAbi.L1FailedRelayedMessageEventSignature:
event := bridge_abi.L1FailedRelayedMessageEvent{} event := bridgeAbi.L1FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err) log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
@@ -343,8 +348,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: false, isSuccessful: false,
}) })
case bridge_abi.L1CommitBatchEventSignature: case bridgeAbi.L1CommitBatchEventSignature:
event := bridge_abi.L1CommitBatchEvent{} event := bridgeAbi.L1CommitBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog) err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err) log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
@@ -356,8 +361,8 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
status: types.RollupCommitted, status: types.RollupCommitted,
}) })
case bridge_abi.L1FinalizeBatchEventSignature: case bridgeAbi.L1FinalizeBatchEventSignature:
event := bridge_abi.L1FinalizeBatchEvent{} event := bridgeAbi.L1FinalizeBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog) err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err) log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)

View File

@@ -3,6 +3,7 @@ package watcher
import ( import (
"context" "context"
"errors" "errors"
"gorm.io/gorm"
"math/big" "math/big"
"testing" "testing"
@@ -11,31 +12,23 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
commonTypes "scroll-tech/common/types" commonTypes "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) { func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupDB(t)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
client, err := ethclient.Dial(base.L1gethImg.Endpoint()) client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err) assert.NoError(t, err)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db) watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
return watcher, db return watcher, db
@@ -43,13 +36,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) {
func testFetchContractEvent(t *testing.T) { func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
} }
func testL1WatcherClientFetchBlockHeader(t *testing.T) { func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
convey.Convey("test toBlock < fromBlock", t, func() { convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64 var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 { if watcher.ProcessedBlockHeight() <= 0 {
@@ -120,7 +113,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
func testL1WatcherClientFetchContractEvent(t *testing.T) { func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
watcher.SetConfirmations(rpc.SafeBlockNumber) watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() { convey.Convey("get latest confirmed block number failure", t, func() {
@@ -165,14 +158,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
convey.Convey("parse bridge event logs failure", t, func() { convey.Convey("parse bridge event logs failure", t, func() {
targetErr := errors.New("parse log failure") targetErr := errors.New("parse log failure")
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) { patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []gethTypes.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
return nil, nil, nil, targetErr return nil, nil, nil, targetErr
}) })
err := watcher.FetchContractEvent() err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error()) assert.Equal(t, err.Error(), targetErr.Error())
}) })
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) { patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []gethTypes.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
rollupEvents := []rollupEvent{ rollupEvents := []rollupEvent{
{ {
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
@@ -289,11 +282,11 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []geth_types.Log{ logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L1QueueTransactionEventSignature}, Topics: []common.Hash{bridgeAbi.L1QueueTransactionEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -315,7 +308,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
convey.Convey("L1QueueTransactionEventSignature success", t, func() { convey.Convey("L1QueueTransactionEventSignature success", t, func() {
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1QueueTransactionEvent) tmpOut := out.(*bridgeAbi.L1QueueTransactionEvent)
tmpOut.QueueIndex = big.NewInt(100) tmpOut.QueueIndex = big.NewInt(100)
tmpOut.Data = []byte("test data") tmpOut.Data = []byte("test data")
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
@@ -337,11 +330,11 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []geth_types.Log{ logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L1RelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -364,7 +357,7 @@ func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
convey.Convey("L1RelayedMessageEventSignature success", t, func() { convey.Convey("L1RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1RelayedMessageEvent) tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -381,11 +374,10 @@ func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []gethTypes.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1FailedRelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -408,7 +400,7 @@ func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() { convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FailedRelayedMessageEvent) tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -425,11 +417,10 @@ func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T)
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []gethTypes.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1CommitBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -452,7 +443,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
convey.Convey("L1CommitBatchEventSignature success", t, func() { convey.Convey("L1CommitBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1CommitBatchEvent) tmpOut := out.(*bridgeAbi.L1CommitBatchEvent)
tmpOut.BatchHash = msgHash tmpOut.BatchHash = msgHash
return nil return nil
}) })
@@ -470,11 +461,10 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer db.Close() defer utils.CloseDB(db)
logs := []gethTypes.Log{
logs := []geth_types.Log{
{ {
Topics: []common.Hash{bridge_abi.L1FinalizeBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -497,7 +487,7 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
convey.Convey("L1FinalizeBatchEventSignature success", t, func() { convey.Convey("L1FinalizeBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FinalizeBatchEvent) tmpOut := out.(*bridgeAbi.L1FinalizeBatchEvent)
tmpOut.BatchHash = msgHash tmpOut.BatchHash = msgHash
return nil return nil
}) })

View File

@@ -4,37 +4,37 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"gorm.io/gorm"
"math/big" "math/big"
"scroll-tech/bridge/internal/orm"
geth "github.com/scroll-tech/go-ethereum" geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/common/hexutil"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics" gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database" bridgeAbi "scroll-tech/bridge/internal/abi"
bridgeTypes "scroll-tech/bridge/internal/types"
bridge_abi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/utils"
) )
// Metrics // Metrics
var ( var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry) bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry) bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry) bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry) bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry) bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
) )
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth // L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
@@ -44,7 +44,11 @@ type L2WatcherClient struct {
*ethclient.Client *ethclient.Client
orm database.OrmFactory db *gorm.DB
blockBatchOrm *orm.BlockBatch
blockTrace *orm.BlockTrace
l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
confirmations rpc.BlockNumber confirmations rpc.BlockNumber
@@ -62,25 +66,31 @@ type L2WatcherClient struct {
} }
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance // NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient { func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight() l2MessageOrm := orm.NewL2Message(db)
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
if err != nil { if err != nil {
log.Warn("fetch height from db failed", "err", err) log.Warn("fetch height from db failed", "err", err)
savedHeight = 0 savedHeight = 0
} }
w := L2WatcherClient{ w := L2WatcherClient{
ctx: ctx, ctx: ctx,
Client: client, db: db,
orm: orm, Client: client,
processedMsgHeight: uint64(savedHeight),
blockBatchOrm: orm.NewBlockBatch(db),
blockTrace: orm.NewBlockTrace(db),
l1MessageOrm: orm.NewL1Message(db),
l2MessageOrm: l2MessageOrm,
processedMsgHeight: savedHeight,
confirmations: confirmations, confirmations: confirmations,
messengerAddress: messengerAddress, messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2ScrollMessengerABI, messengerABI: bridgeAbi.L2ScrollMessengerABI,
messageQueueAddress: messageQueueAddress, messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L2MessageQueueABI, messageQueueABI: bridgeAbi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot, withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0, stopped: 0,
@@ -95,7 +105,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
} }
func (w *L2WatcherClient) initializeGenesis() error { func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil { if count, err := w.blockBatchOrm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err) return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 { } else if count > 0 {
log.Info("genesis already imported") log.Info("genesis already imported")
@@ -109,21 +119,25 @@ func (w *L2WatcherClient) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
blockTrace := &types.WrappedBlock{Header: genesis, Transactions: nil, WithdrawTrieRoot: common.Hash{}} blockTrace := &bridgeTypes.WrappedBlock{
batchData := types.NewGenesisBatchData(blockTrace) Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
}
batchData := bridgeTypes.NewGenesisBatchData(blockTrace)
if err = AddBatchInfoToDB(w.orm, batchData); err != nil { if err = orm.AddBatchInfoToDB(w.db, batchData); err != nil {
log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err) log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err)
return err return err
} }
batchHash := batchData.Hash().Hex() batchHash := batchData.Hash().Hex()
if err = w.orm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil { if err = w.blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err) return fmt.Errorf("failed to update genesis batch proving status: %v", err)
} }
if err = w.orm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil { if err = w.blockBatchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err) return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
} }
@@ -139,7 +153,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, bloc
// Get newest block in DB. must have blocks at that time. // Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number, // Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped // because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetL2BlocksLatestHeight() heightInDB, err := w.blockTrace.GetL2BlocksLatestHeight()
if err != nil { if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err) log.Error("failed to GetL2BlocksLatestHeight", "err", err)
return return
@@ -168,11 +182,11 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, bloc
} }
} }
func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData { func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*geth_types.TransactionData, len(txs)) txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs { for i, tx := range txs {
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
txsData[i] = &geth_types.TransactionData{ txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(), Type: tx.Type(),
TxHash: tx.Hash().String(), TxHash: tx.Hash().String(),
Nonce: tx.Nonce(), Nonce: tx.Nonce(),
@@ -192,8 +206,7 @@ func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
} }
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error { func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock var blocks []*bridgeTypes.WrappedBlock
for number := from; number <= to; number++ { for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number) log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number))) block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -208,7 +221,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number) return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
} }
blocks = append(blocks, &types.WrappedBlock{ blocks = append(blocks, &bridgeTypes.WrappedBlock{
Header: block.Header(), Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()), Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot), WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
@@ -216,7 +229,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
} }
if len(blocks) > 0 { if len(blocks) > 0 {
if err := w.orm.InsertWrappedBlocks(blocks); err != nil { if err := w.blockTrace.InsertWrappedBlocks(blocks); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err) return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
} }
} }
@@ -257,10 +270,10 @@ func (w *L2WatcherClient) FetchContractEvent() {
Topics: make([][]common.Hash, 1), Topics: make([][]common.Hash, 1),
} }
query.Topics[0] = make([]common.Hash, 4) query.Topics[0] = make([]common.Hash, 4)
query.Topics[0][0] = bridge_abi.L2SentMessageEventSignature query.Topics[0][0] = bridgeAbi.L2SentMessageEventSignature
query.Topics[0][1] = bridge_abi.L2RelayedMessageEventSignature query.Topics[0][1] = bridgeAbi.L2RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L2FailedRelayedMessageEventSignature query.Topics[0][2] = bridgeAbi.L2FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L2AppendMessageEventSignature query.Topics[0][3] = bridgeAbi.L2AppendMessageEventSignature
logs, err := w.FilterLogs(w.ctx, query) logs, err := w.FilterLogs(w.ctx, query)
if err != nil { if err != nil {
@@ -295,13 +308,13 @@ func (w *L2WatcherClient) FetchContractEvent() {
} else { } else {
msgStatus = types.MsgFailed msgStatus = types.MsgFailed
} }
if err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil { if err = w.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err) log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return return
} }
} }
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil { if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err) log.Error("failed to save l2 messages", "err", err)
return return
} }
@@ -311,18 +324,18 @@ func (w *L2WatcherClient) FetchContractEvent() {
} }
} }
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) { func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l2Messages []*types.L2Message var l2Messages []orm.L2Message
var relayedMessages []relayedMessage var relayedMessages []relayedMessage
var lastAppendMsgHash common.Hash var lastAppendMsgHash common.Hash
var lastAppendMsgNonce uint64 var lastAppendMsgNonce uint64
for _, vLog := range logs { for _, vLog := range logs {
switch vLog.Topics[0] { switch vLog.Topics[0] {
case bridge_abi.L2SentMessageEventSignature: case bridgeAbi.L2SentMessageEventSignature:
event := bridge_abi.L2SentMessageEvent{} event := bridgeAbi.L2SentMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
if err != nil { if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err) log.Error("failed to unpack layer2 SentMessage event", "err", err)
@@ -350,7 +363,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
return l2Messages, relayedMessages, errors.New(errMsg) return l2Messages, relayedMessages, errors.New(errMsg)
} }
l2Messages = append(l2Messages, &types.L2Message{ l2Messages = append(l2Messages, orm.L2Message{
Nonce: event.MessageNonce.Uint64(), Nonce: event.MessageNonce.Uint64(),
MsgHash: computedMsgHash.String(), MsgHash: computedMsgHash.String(),
Height: vLog.BlockNumber, Height: vLog.BlockNumber,
@@ -360,8 +373,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
Calldata: common.Bytes2Hex(event.Message), Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(), Layer2Hash: vLog.TxHash.Hex(),
}) })
case bridge_abi.L2RelayedMessageEventSignature: case bridgeAbi.L2RelayedMessageEventSignature:
event := bridge_abi.L2RelayedMessageEvent{} event := bridgeAbi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err) log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
@@ -373,8 +386,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: true, isSuccessful: true,
}) })
case bridge_abi.L2FailedRelayedMessageEventSignature: case bridgeAbi.L2FailedRelayedMessageEventSignature:
event := bridge_abi.L2FailedRelayedMessageEvent{} event := bridgeAbi.L2FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog) err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err) log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
@@ -386,8 +399,8 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.
txHash: vLog.TxHash, txHash: vLog.TxHash,
isSuccessful: false, isSuccessful: false,
}) })
case bridge_abi.L2AppendMessageEventSignature: case bridgeAbi.L2AppendMessageEventSignature:
event := bridge_abi.L2AppendMessageEvent{} event := bridgeAbi.L2AppendMessageEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog) err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
if err != nil { if err != nil {
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err) log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)

View File

@@ -4,7 +4,9 @@ import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"gorm.io/gorm"
"math/big" "math/big"
"scroll-tech/bridge/internal/orm"
"strconv" "strconv"
"testing" "testing"
"time" "time"
@@ -13,7 +15,7 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
@@ -21,42 +23,29 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge" "scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
) )
func setupL2Watcher(t *testing.T) *L2WatcherClient { func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db, err := database.NewOrmFactory(cfg.DBConfig) db := setupDB(t)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db) watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher return watcher, db
} }
func testCreateNewWatcherAndStop(t *testing.T) { func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db. wc, db := setupL2Watcher(t)
l2db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
l2db.Close() defer utils.CloseDB(db)
}() }()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
l1cfg := cfg.L1Config l1cfg := cfg.L1Config
@@ -79,20 +68,13 @@ func testCreateNewWatcherAndStop(t *testing.T) {
} }
func testMonitorBridgeContract(t *testing.T) { func testMonitorBridgeContract(t *testing.T) {
// Create db handler and reset db. wc, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
db.Close() defer utils.CloseDB(db)
}() }()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background()) previousHeight, err := l2Cli.BlockNumber(context.Background())
@@ -117,7 +99,7 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx) receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
@@ -127,34 +109,30 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx) receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events // check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
height, err := db.GetLayer2LatestWatchedHeight() height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight) return err == nil && height > previousHeight
})) }))
// check l1 messages. // check l1 messages.
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending}) msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 2 return err == nil && len(msgs) == 2
})) }))
} }
func testFetchMultipleSentMessageInOneBlock(t *testing.T) { func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Create db handler and reset db. _, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) subCtx, cancel := context.WithCancel(context.Background())
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() { defer func() {
cancel() cancel()
db.Close() defer utils.CloseDB(db)
}() }()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
@@ -172,8 +150,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Call mock_bridge instance sendMessage to trigger emit events multiple times // Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4 numTransactions := 4
var tx *geth_types.Transaction var tx *gethTypes.Transaction
for i := 0; i < numTransactions; i++ { for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63") addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr) nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
@@ -188,7 +165,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
} }
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx) receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
@@ -204,28 +181,26 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit) tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err) assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx) receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events // check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
height, err := db.GetLayer2LatestWatchedHeight() height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight) return err == nil && height > previousHeight
})) }))
assert.True(t, cutils.TryTimes(10, func() bool { assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending}) msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 5 return err == nil && len(msgs) == 5
})) }))
} }
func testFetchRunningMissingBlocks(t *testing.T) { func testFetchRunningMissingBlocks(t *testing.T) {
// Create db handler and reset db. _, db := setupL2Watcher(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0]) auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
@@ -235,6 +210,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx) address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err) assert.NoError(t, err)
blockTraceOrm := orm.NewBlockTrace(db)
ok := cutils.TryTimes(10, func() bool { ok := cutils.TryTimes(10, func() bool {
latestHeight, err := l2Cli.BlockNumber(context.Background()) latestHeight, err := l2Cli.BlockNumber(context.Background())
if err != nil { if err != nil {
@@ -242,13 +218,13 @@ func testFetchRunningMissingBlocks(t *testing.T) {
} }
wc := prepareWatcherClient(l2Cli, db, address) wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight) wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight)
fetchedHeight, err := db.GetL2BlocksLatestHeight() fetchedHeight, err := blockTraceOrm.GetL2BlocksLatestHeight()
return err == nil && uint64(fetchedHeight) == latestHeight return err == nil && fetchedHeight == latestHeight
}) })
assert.True(t, ok) assert.True(t, ok)
} }
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *L2WatcherClient { func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db) return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
} }
@@ -268,11 +244,13 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
} }
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{ Topics: []common.Hash{
bridge_abi.L2SentMessageEventSignature, bridgeAbi.L2SentMessageEventSignature,
}, },
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
@@ -281,7 +259,7 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
convey.Convey("unpack SentMessage log failure", t, func() { convey.Convey("unpack SentMessage log failure", t, func() {
targetErr := errors.New("UnpackLog SentMessage failure") targetErr := errors.New("UnpackLog SentMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -298,8 +276,8 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
tmpValue := big.NewInt(1000) tmpValue := big.NewInt(1000)
tmpMessageNonce := big.NewInt(100) tmpMessageNonce := big.NewInt(100)
tmpMessage := []byte("test for L2SentMessageEventSignature") tmpMessage := []byte("test for L2SentMessageEventSignature")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2SentMessageEvent) tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
tmpOut.Sender = tmpSendAddr tmpOut.Sender = tmpSendAddr
tmpOut.Value = tmpValue tmpOut.Value = tmpValue
tmpOut.Target = tmpTargetAddr tmpOut.Target = tmpTargetAddr
@@ -317,10 +295,12 @@ func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
} }
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2RelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2RelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -328,7 +308,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
convey.Convey("unpack RelayedMessage log failure", t, func() { convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure") targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -341,8 +321,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
convey.Convey("L2RelayedMessageEventSignature success", t, func() { convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2RelayedMessageEvent) tmpOut := out.(*bridgeAbi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -357,10 +337,12 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
} }
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2FailedRelayedMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -368,7 +350,7 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("unpack FailedRelayedMessage log failure", t, func() { convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure") targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -381,8 +363,8 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() { convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2FailedRelayedMessageEvent) tmpOut := out.(*bridgeAbi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
return nil return nil
}) })
@@ -397,10 +379,11 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
} }
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
logs := []geth_types.Log{ defer utils.CloseDB(db)
logs := []gethTypes.Log{
{ {
Topics: []common.Hash{bridge_abi.L2AppendMessageEventSignature}, Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
BlockNumber: 100, BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
}, },
@@ -408,7 +391,7 @@ func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
convey.Convey("unpack AppendMessage log failure", t, func() { convey.Convey("unpack AppendMessage log failure", t, func() {
targetErr := errors.New("UnpackLog AppendMessage failure") targetErr := errors.New("UnpackLog AppendMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr return targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
@@ -421,8 +404,8 @@ func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
convey.Convey("L2AppendMessageEventSignature success", t, func() { convey.Convey("L2AppendMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridge_abi.L2AppendMessageEvent) tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
tmpOut.MessageHash = msgHash tmpOut.MessageHash = msgHash
tmpOut.Index = big.NewInt(100) tmpOut.Index = big.NewInt(100)
return nil return nil

View File

@@ -7,11 +7,14 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -24,8 +27,8 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *types.WrappedBlock wrappedBlock1 *bridgeTypes.WrappedBlock
wrappedBlock2 *types.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
@@ -37,34 +40,48 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig cfg.DBConfig = &bridgeTypes.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = base.L2Client()
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{} wrappedBlock1 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil { if err != nil {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{} wrappedBlock2 = &bridgeTypes.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
return err return err
} }
func setupDB(t *testing.T) *gorm.DB {
db, err := utils.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() base = docker.NewDockerApp()

View File

@@ -0,0 +1,300 @@
package orm
import (
"context"
"github.com/scroll-tech/go-ethereum/log"
bridgeTypes "scroll-tech/bridge/internal/types"
"time"
"gorm.io/gorm"
"scroll-tech/common/types"
)
type BlockBatch struct {
db *gorm.DB `gorm:"-"`
Hash string `json:"hash" gorm:"hash"`
Index uint64 `json:"index" gorm:"index"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"start_block_number"`
StartBlockHash string `json:"start_block_hash" gorm:"start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"end_block_number"`
EndBlockHash string `json:"end_block_hash" gorm:"end_block_hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
StateRoot string `json:"state_root" gorm:"state_root"`
TotalTxNum uint64 `json:"total_tx_num" gorm:"total_tx_num"`
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"total_l1_tx_num"`
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"total_l2_gas"`
ProvingStatus int `json:"proving_status" gorm:"proving_status"`
Proof string `json:"proof" gorm:"proof"`
InstanceCommitments string `json:"instance_commitments" gorm:"instance_commitments"`
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"proof_time_sec"`
RollupStatus int `json:"rollup_status" gorm:"rollup_status"`
CommitTxHash string `json:"commit_tx_hash" gorm:"commit_tx_hash"`
OracleStatus int `json:"oracle_status" gorm:"oracle_status"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"oracle_tx_hash"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"finalize_tx_hash"`
CreatedAt time.Time `json:"created_at" gorm:"created_at"`
ProverAssignedAt time.Time `json:"prover_assigned_at" gorm:"prover_assigned_at"`
ProvedAt time.Time `json:"proved_at" gorm:"proved_at"`
CommittedAt time.Time `json:"committed_at" gorm:"committed_at"`
FinalizedAt time.Time `json:"finalized_at" gorm:"finalized_at"`
}
// NewBlockBatch create an blockBatchOrm instance
func NewBlockBatch(db *gorm.DB) *BlockBatch {
return &BlockBatch{db: db}
}
// TableName define the L1Message table name
func (*BlockBatch) TableName() string {
return "block_batch"
}
// GetBatchCount get the batch count
func (o *BlockBatch) GetBatchCount() (int64, error) {
var count int64
if err := o.db.Model(&BlockBatch{}).Count(&count).Error; err != nil {
return 0, err
}
return count, nil
}
// GetBlockBatches get the select block batches
func (o *BlockBatch) GetBlockBatches(fields map[string]interface{}, orderByList []string, limit int) ([]BlockBatch, error) {
var blockBatches []BlockBatch
db := o.db
for key, value := range fields {
db.Where(key, value)
}
for _, orderBy := range orderByList {
db.Order(orderBy)
}
if limit != 0 {
db.Limit(limit)
}
if err := db.Find(&blockBatches).Error; err != nil {
return nil, err
}
return blockBatches, nil
}
// GetVerifiedProofAndInstanceCommitmentsByHash get verified proof and instance comments by hash
func (o *BlockBatch) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) {
var blockBatch BlockBatch
err := o.db.Select("proof, instance_commitments").Where("hash", hash).Where("proving_status").Find(&blockBatch).Error
if err != nil {
return nil, nil, err
}
return []byte(blockBatch.Proof), []byte(blockBatch.InstanceCommitments), nil
}
// GetPendingBatches get the pending batches
func (o *BlockBatch) GetPendingBatches(limit int) ([]string, error) {
var blockBatches []BlockBatch
err := o.db.Select("hash").Where("rollup_status", types.RollupPending).Order("index ASC").Limit(limit).Error
if err != nil {
return nil, err
}
var hashes []string
for _, v := range blockBatches {
hashes = append(hashes, v.Hash)
}
return hashes, nil
}
// GetLatestBatch get the latest batch
// Need to optimize the query.
func (o *BlockBatch) GetLatestBatch() (*BlockBatch, error) {
var blockBatch BlockBatch
subQuery := o.db.Table("block_batch").Select("max(index)")
err := o.db.Where("index", subQuery).Find(&blockBatch).Error
if err != nil {
return nil, err
}
return &blockBatch, nil
}
func (o *BlockBatch) GetLatestBatchByRollupStatus(rollupStatuses []types.RollupStatus) (*BlockBatch, error) {
var blockBatch BlockBatch
subQuery := o.db.Table("block_batch").Select("max(index)").Where("rollup_status IN (?)", rollupStatuses)
err := o.db.Where("index", subQuery).Find(&blockBatch).Error
if err != nil {
return nil, err
}
return &blockBatch, nil
}
// GetCommittedBatches get the committed block batches
func (o *BlockBatch) GetCommittedBatches(limit int) ([]string, error) {
var blockBatches []BlockBatch
err := o.db.Select("hash").Where("rollup_status", types.RollupCommitted).Order("index ASC").Limit(limit).Error
if err != nil {
return nil, err
}
var hashes []string
for _, v := range blockBatches {
hashes = append(hashes, v.Hash)
}
return hashes, nil
}
// GetRollupStatusByHashList get rollup status by hash list
func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
return nil, nil
}
var blockBatches []BlockBatch
err := o.db.Select("hash, rollup_status").Where("hash IN (?)", hashes).Find(&blockBatches).Error
if err != nil {
return nil, err
}
var statuses []types.RollupStatus
for _, v := range blockBatches {
statuses = append(statuses, types.RollupStatus(v.RollupStatus))
}
return statuses, nil
}
// InsertBlockBatchByBatchData insert a block batch data by the BatchData
func (o *BlockBatch) InsertBlockBatchByBatchData(tx *gorm.DB, batchData *bridgeTypes.BatchData) (int64, error) {
var db *gorm.DB
if tx != nil {
db = tx
} else {
db = o.db
}
numBlocks := len(batchData.Batch.Blocks)
insertBlockBatch := BlockBatch{
Hash: batchData.Hash().Hex(),
Index: batchData.Batch.BatchIndex,
StartBlockNumber: batchData.Batch.Blocks[0].BlockNumber,
StartBlockHash: batchData.Batch.Blocks[0].BlockHash.Hex(),
EndBlockNumber: batchData.Batch.Blocks[numBlocks-1].BlockNumber,
EndBlockHash: batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(),
ParentHash: batchData.Batch.ParentBatchHash.Hex(),
StateRoot: batchData.Batch.NewStateRoot.Hex(),
TotalTxNum: batchData.TotalTxNum,
TotalL1TxNum: batchData.TotalL1TxNum,
TotalL2Gas: batchData.TotalL2Gas,
CreatedAt: time.Now(),
}
result := db.Create(&insertBlockBatch)
if result.Error != nil {
log.Error("failed to insert block batch by batchData", "err", result.Error)
return 0, result.Error
}
return result.RowsAffected, nil
}
// UpdateProvingStatus update the proving status
func (o *BlockBatch) UpdateProvingStatus(hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = status
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
default:
}
if err := o.db.Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateRollupStatus update the rollup status
func (o *BlockBatch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = status
switch status {
case types.RollupCommitted:
updateFields["committed_at"] = time.Now()
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
}
if err := o.db.Model(&BlockBatch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateSkippedBatches update the skipped batches
func (o *BlockBatch) UpdateSkippedBatches() (int64, error) {
provingStatusList := []interface{}{
types.ProvingTaskSkipped,
types.ProvingTaskFailed,
}
result := o.db.Model(&BlockBatch{}).Where("rollup_status", types.RollupCommitted).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", types.RollupFinalizationSkipped)
if result.Error != nil {
return 0, result.Error
}
return result.RowsAffected, nil
}
// UpdateCommitTxHashAndRollupStatus update the commit tx hash and rollup status
func (o *BlockBatch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = status
if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateFinalizeTxHashAndRollupStatus update the finalize tx hash and rollup status
func (o *BlockBatch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["rollup_status"] = status
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
}
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateL2GasOracleStatusAndOracleTxHash update the l2 gas oracle status and oracle tx hash
func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = status
updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}
// UpdateProofByHash update the block batch proof by hash
// for unit test
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
updateFields := make(map[string]interface{})
updateFields["proof"] = proof
updateFields["instance_commitments"] = instanceCommitments
updateFields["proof_time_sec"] = proofTimeSec
err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
if err != nil {
log.Error("failed to update proof", "err", err)
}
return err
}

View File

@@ -0,0 +1,151 @@
package orm
import (
"encoding/json"
"errors"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge/internal/types"
)
type BlockTrace struct {
db *gorm.DB `gorm:"-"`
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
Trace string `json:"trace" gorm:"trace"`
BatchHash string `json:"batch_hash" db:"batch_hash"`
TxNum uint64 `json:"tx_num" db:"tx_num"`
GasUsed uint64 `json:"gas_used" db:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
}
// NewBlockTrace create an blockTraceOrm instance
func NewBlockTrace(db *gorm.DB) *BlockTrace {
return &BlockTrace{db: db}
}
// TableName define the L1Message table name
func (*BlockTrace) TableName() string {
return "block_trace"
}
// GetL2BlocksLatestHeight get the l2 blocks latest height
func (o *BlockTrace) GetL2BlocksLatestHeight() (uint64, error) {
var blockTrace BlockTrace
err := o.db.Select("COALESCE(MAX(number), -1)").First(&blockTrace).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return 0, err
}
return blockTrace.Number, nil
}
// GetL2WrappedBlocks get the l2 wrapped blocks
func (o *BlockTrace) GetL2WrappedBlocks(fields map[string]interface{}) ([]*types.WrappedBlock, error) {
var blockTraces []BlockTrace
db := o.db.Select("trace")
for key, value := range fields {
db.Where(key, value)
}
if err := db.Find(&blockTraces).Error; err != nil {
return nil, err
}
var wrappedBlocks []*types.WrappedBlock
for _, v := range blockTraces {
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Trace), &wrappedBlock); err != nil {
break
}
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
return wrappedBlocks, nil
}
// GetL2BlockInfos get l2 block infos
func (o *BlockTrace) GetL2BlockInfos(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
var blockTraces []BlockTrace
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp")
for key, value := range fields {
db.Where(key, value)
}
for _, orderBy := range orderByList {
db.Order(orderBy)
}
if limit != 0 {
db.Limit(limit)
}
if err := db.Find(&blockTraces).Error; err != nil {
return nil, err
}
return blockTraces, nil
}
// GetUnbatchedL2Blocks get unbatched l2 blocks
func (o *BlockTrace) GetUnbatchedL2Blocks(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
var unbatchedBlockTraces []BlockTrace
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp").Where("batch_hash is NULL")
for key, value := range fields {
db.Where(key, value)
}
if err := db.Find(&unbatchedBlockTraces).Error; err != nil {
return nil, err
}
return unbatchedBlockTraces, nil
}
// InsertWrappedBlocks insert block to block trace
func (o *BlockTrace) InsertWrappedBlocks(blocks []*types.WrappedBlock) error {
blockTraces := make([]BlockTrace, len(blocks))
for _, block := range blocks {
number := block.Header.Number.Uint64()
hash := block.Header.Hash().String()
txNum := len(block.Transactions)
mtime := block.Header.Time
gasCost := block.Header.GasUsed
data, err := json.Marshal(block)
if err != nil {
log.Error("failed to marshal block", "hash", hash, "err", err)
return err
}
tmpBlockTrace := BlockTrace{
Number: number,
Hash: hash,
ParentHash: block.Header.ParentHash.String(),
Trace: string(data),
TxNum: uint64(txNum),
GasUsed: gasCost,
BlockTimestamp: mtime,
}
blockTraces = append(blockTraces, tmpBlockTrace)
}
if err := o.db.Create(&blockTraces).Error; err != nil {
log.Error("failed to insert blockTraces", "err", err)
return err
}
return nil
}
// UpdateBatchHashForL2Blocks update the batch_hash of block trace
func (o *BlockTrace) UpdateBatchHashForL2Blocks(tx *gorm.DB, numbers []uint64, batchHash string) error {
var db *gorm.DB
if tx != nil {
db = tx
} else {
db = o.db
}
err := db.Model(&BlockTrace{}).Where("number IN (?)", numbers).Update("batch_hash", batchHash).Error
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,38 @@
package orm
import (
"errors"
"gorm.io/gorm"
bridgeTypes "scroll-tech/bridge/internal/types"
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
// in all blocks included in the batch.
func AddBatchInfoToDB(db *gorm.DB, batchData *bridgeTypes.BatchData) error {
blockBatch := NewBlockBatch(db)
blockTrace := NewBlockTrace(db)
err := db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatch.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTrace.UpdateBatchHashForL2Blocks(tx, blockIDs, batchData.Hash().Hex())
if dbTxErr != nil {
return dbTxErr
}
return nil
})
return err
}

View File

@@ -0,0 +1,83 @@
package orm
import (
"context"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
type L1Block struct {
db *gorm.DB `gorm:"-"`
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
HeaderRLP string `json:"header_rlp" gorm:"header_rlp"`
BaseFee uint64 `json:"base_fee" gorm:"base_fee"`
BlockStatus int `json:"block_status" gorm:"block_status"`
GasOracleStatus int `json:"oracle_status" gorm:"oracle_status"`
ImportTxHash string `json:"import_tx_hash" gorm:"import_tx_hash"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"oracle_tx_hash"`
}
// NewL1Block create an l1Block instance
func NewL1Block(db *gorm.DB) *L1Block {
return &L1Block{db: db}
}
// TableName define the L1Message table name
func (*L1Block) TableName() string {
return "l1_block"
}
// GetLatestL1BlockHeight get the latest l1 block height
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) {
var block L1Block
err := l.db.Select("COALESCE(MAX(number), 0)").First(&block).Error
if err != nil {
return 0, err
}
return block.Number, nil
}
// GetL1BlockInfos get the l1 block infos
func (l *L1Block) GetL1BlockInfos(fields map[string]interface{}) ([]L1Block, error) {
var l1Blocks []L1Block
selectFileds := "number, hash, header_rlp, base_fee, block_status, oracle_status, import_tx_hash, oracle_tx_hash"
db := l.db.Select(selectFileds)
for key, value := range fields {
db.Where(key, value)
}
db.Order("number ASC")
if err := db.Find(&l1Blocks).Error; err != nil {
return nil, err
}
return l1Blocks, nil
}
// InsertL1Blocks batch insert l1 blocks
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 {
return nil
}
err := l.db.WithContext(ctx).Create(&blocks).Error
if err != nil {
log.Error("failed to insert L1 Blocks", "err", err)
}
return err
}
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{
"oracle_status": status,
"oracle_tx_hash": txHash,
}
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,127 @@
package orm
import (
"context"
"errors"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
db *gorm.DB `gorm:"-"`
QueueIndex uint64 `json:"queue_index" gorm:"queue_index"`
MsgHash string `json:"msg_hash" gorm:"msg_hash"`
Height uint64 `json:"height" gorm:"height"`
GasLimit uint64 `json:"gas_limit" gorm:"gas_limit"`
Sender string `json:"sender" gorm:"sender"`
Target string `json:"target" gorm:"target"`
Value string `json:"value" gorm:"value"`
Calldata string `json:"calldata" gorm:"calldata"`
Layer1Hash string `json:"layer1_hash" gorm:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"layer2_hash"`
Status int `json:"status" gorm:"status"`
CreatedTime time.Time `json:"created_time" gorm:"created_time"`
UpdatedTime time.Time `json:"updated_time" gorm:"updated_time"`
}
// NewL1Message create an L1MessageOrm instance
func NewL1Message(db *gorm.DB) *L1Message {
return &L1Message{db: db}
}
// TableName define the L1Message table name
func (*L1Message) TableName() string {
return "l2_message"
}
// GetLayer1LatestWatchedHeight returns latest height stored in the table
func (m *L1Message) GetLayer1LatestWatchedHeight() (uint64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
var msg L1Message
err := m.db.Select("MAX(height)").First(&msg).Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return 0, err
}
return msg.Height, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
var msgs []L1Message
fields := "queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, status"
err := m.db.Select(fields).Where("status", status).Order("queue_index ASC").Limit(int(limit)).Find(&msgs).Error
if err != nil {
return nil, err
}
return msgs, nil
}
// GetL1MessageByQueueIndex fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByQueueIndex(queueIndex uint64) (*L1Message, error) {
var msg L1Message
selectFields := "queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, layer2_hash, status"
err := m.db.Select(selectFields).Where("queue_index", queueIndex).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessageByMsgHash fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByMsgHash(msgHash string) (*L1Message, error) {
var msg L1Message
selectFileds := "queue_index, msg_hash, height, sender, target, value, gas_limit, calldata, layer1_hash, status"
err := m.db.Select(selectFileds).Where("msg_hash", msgHash).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL1Messages batch save a list of layer1 messages
func (m *L1Message) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
queueIndices := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
queueIndices = append(queueIndices, msg.QueueIndex)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1Messages", "queueIndices", queueIndices, "heights", heights, "err", err)
}
return err
}
// UpdateLayer1Status updates message stauts, given message hash
func (m *L1Message) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", status).Error; err != nil {
return err
}
return nil
}
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
func (m *L1Message) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
updateFields := map[string]interface{}{
"status": status,
"layer2_hash": layer2Hash,
}
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,128 @@
package orm
import (
"context"
"errors"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
type L2Message struct {
db *gorm.DB `gorm:"-"`
Nonce uint64 `json:"nonce" gorm:"nonce"`
MsgHash string `json:"msg_hash" gorm:"msg_hash"`
Height uint64 `json:"height" gorm:"height"`
Sender string `json:"sender" gorm:"sender"`
Value string `json:"value" gorm:"value"`
Target string `json:"target" gorm:"target"`
Calldata string `json:"calldata" gorm:"calldata"`
Layer2Hash string `json:"layer2_hash" gorm:"layer2_hash"`
Layer1Hash string `json:"layer1_hash" gorm:"layer1_hash"`
Proof string `json:"proof" gorm:"proof"`
Status int `json:"status" gorm:"status"`
CreatedTime time.Time `json:"created_time" gorm:"created_time"`
UpdatedTime time.Time `json:"updated_time" gorm:"updated_time"`
}
// NewL2Message create an L2Message instance
func NewL2Message(db *gorm.DB) *L2Message {
return &L2Message{db: db}
}
// TableName define the L1Message table name
func (*L2Message) TableName() string {
return "l1_message"
}
// GetL2Messages fetch list of messages given msg status
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
var l2MsgList []L2Message
selectFields := "nonce, msg_hash, height, sender, target, value, calldata, layer2_hash"
db := m.db.Select(selectFields)
for key, value := range fields {
db.Where(key, value)
}
for _, orderBy := range orderByList {
db.Order(orderBy)
}
if limit != 0 {
db.Limit(limit)
}
if err := db.Find(&l2MsgList).Error; err != nil {
return nil, err
}
return l2MsgList, nil
}
// GetLayer2LatestWatchedHeight returns latest height stored in the table
func (m *L2Message) GetLayer2LatestWatchedHeight() (uint64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
var L2Msg L2Message
err := m.db.Select("COALESCE(MAX(height), -1)").First(&L2Msg).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return 0, err
}
return L2Msg.Height, nil
}
// GetL2MessageByNonce fetch message by nonce
// for unit test
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
var msg L2Message
selectFields := "nonce, msg_hash, height, sender, target, value, calldata, layer2_hash, status"
err := m.db.Select(selectFields).Where("nonce", nonce).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL2Messages batch save a list of layer2 messages
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
nonces = append(nonces, msg.Nonce)
heights = append(heights, msg.Height)
}
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
}
return err
}
// UpdateLayer2Status updates message stauts, given message hash
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", status).Error
if err != nil {
return err
}
return nil
}
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
updateFields := map[string]interface{}{
"status": status,
"layer1_hash": layer1Hash,
}
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,62 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -0,0 +1,86 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
create table block_trace
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
trace JSON NOT NULL,
batch_hash VARCHAR DEFAULT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL
);
create unique index block_trace_hash_uindex
on block_trace (hash);
create unique index block_trace_number_uindex
on block_trace (number);
create unique index block_trace_parent_uindex
on block_trace (number, parent_hash);
create unique index block_trace_parent_hash_uindex
on block_trace (hash, parent_hash);
create index block_trace_batch_hash_index
on block_trace (batch_hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_trace;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table block_batch
(
hash VARCHAR NOT NULL,
index BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
start_block_hash VARCHAR NOT NULL,
end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
total_tx_num BIGINT NOT NULL,
total_l1_tx_num BIGINT NOT NULL,
total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL,
instance_commitments BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL,
finalize_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
committed_at TIMESTAMP(0) DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index block_batch_hash_uindex
on block_batch (hash);
create unique index block_batch_index_uindex
on block_batch (index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_batch;
-- +goose StatementEnd

View File

@@ -0,0 +1,18 @@
-- +goose Up
-- +goose StatementBegin
create table session_info
(
hash VARCHAR NOT NULL,
rollers_info BYTEA NOT NULL
);
create unique index session_info_hash_uindex
on session_info (hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists session_info;
-- +goose StatementEnd

View File

@@ -0,0 +1,33 @@
-- +goose Up
-- +goose StatementBegin
create table l1_block
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
import_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL
);
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
create unique index l1_block_number_uindex
on l1_block (number);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
create table agg_task
(
id VARCHAR NOT NULL,
start_batch_index BIGINT NOT NULL,
start_batch_hash VARCHAR NOT NULL,
end_batch_index BIGINT NOT NULL,
end_batch_hash VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
proof BYTEA DEFAULT NULL,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
);
create unique index agg_task_hash_uindex
on agg_task (id);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON agg_task FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists agg_task;
-- +goose StatementEnd

View File

@@ -0,0 +1,237 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/internal/abi"
"scroll-tech/bridge/internal/orm"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *orm.BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
if i == 0 {
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

View File

@@ -0,0 +1,91 @@
package types
import (
"math/big"
"testing"
"gotest.tools/assert"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &geth_types.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}

View File

@@ -0,0 +1,14 @@
package types
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}

View File

@@ -0,0 +1,54 @@
package utils
import (
"log"
"os"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/bridge/internal/config"
)
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
logLevel := logger.Silent
if config.ShowSql {
logLevel = logger.Info
}
newLogger := logger.New(
log.New(os.Stdout, "\r\n", log.LstdFlags), // io writer
logger.Config{
SlowThreshold: config.SlowSqlThreshold, // Slow SQL threshold
LogLevel: logLevel, // Log level
},
)
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: newLogger,
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
sqlDB.Close()
return nil
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/crypto"
bridgeabi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/internal/abi"
) )
// Keccak2 compute the keccack256 of two concatenations of bytes32 // Keccak2 compute the keccack256 of two concatenations of bytes32
@@ -25,7 +25,7 @@ func ComputeMessageHash(
messageNonce *big.Int, messageNonce *big.Int,
message []byte, message []byte,
) common.Hash { ) common.Hash {
data, _ := bridgeabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message) data, _ := bridgeAbi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data)) return common.BytesToHash(crypto.Keccak256(data))
} }

View File

@@ -1,11 +0,0 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

545
bridge/testdata/blockTrace_02.json vendored Normal file
View File

@@ -0,0 +1,545 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

12877
bridge/testdata/blockTrace_03.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,7 @@ package tests
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"gorm.io/gorm"
"math/big" "math/big"
"testing" "testing"
@@ -14,10 +15,13 @@ import (
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
) )
var ( var (
@@ -49,6 +53,15 @@ var (
l2MessengerAddress common.Address l2MessengerAddress common.Address
) )
func setupDB(t *testing.T) *gorm.DB {
db, err := utils.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
@@ -90,7 +103,12 @@ func setupEnv(t *testing.T) {
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint() cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
// Create db container. // Create db container.
cfg.DBConfig = base.DBConfig cfg.DBConfig = &bridgeTypes.DBConfig{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l1geth and l2geth client. // Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint) l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)

View File

@@ -6,24 +6,22 @@ import (
"testing" "testing"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testImportL1GasPrice(t *testing.T) { func testImportL1GasPrice(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -45,48 +43,37 @@ func testImportL1GasPrice(t *testing.T) {
err = l1Watcher.FetchBlockHeader(number) err = l1Watcher.FetchBlockHeader(number)
assert.NoError(t, err) assert.NoError(t, err)
l1BlockOrm := orm.NewL1Block(db)
// check db status // check db status
latestBlockHeight, err := db.GetLatestL1BlockHeight() latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight) assert.Equal(t, number, latestBlockHeight)
blocks, err := db.GetL1BlockInfos(map[string]interface{}{ blocks, err := l1BlockOrm.GetL1BlockInfos(map[string]interface{}{"number": latestBlockHeight})
"number": latestBlockHeight,
})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOraclePending) assert.Equal(t, blocks[0].GasOracleStatus, types.GasOraclePending)
assert.Equal(t, blocks[0].OracleTxHash.Valid, false)
// relay gas price // relay gas price
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
blocks, err = db.GetL1BlockInfos(map[string]interface{}{ blocks, err = l1BlockOrm.GetL1BlockInfos(map[string]interface{}{"number": latestBlockHeight})
"number": latestBlockHeight,
})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOracleImporting) assert.Equal(t, blocks[0].GasOracleStatus, types.GasOracleImporting)
assert.Equal(t, blocks[0].OracleTxHash.Valid, true)
} }
func testImportL2GasPrice(t *testing.T) { func testImportL2GasPrice(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
// Create L2Relayer
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig) l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
// add fake blocks // add fake blocks
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1), Number: big.NewInt(1),
ParentHash: common.Hash{}, ParentHash: common.Hash{},
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
@@ -96,32 +83,33 @@ func testImportL2GasPrice(t *testing.T) {
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{ blockTraceOrm := orm.NewBlockTrace(db)
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
parentBatch := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{ batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
traces[0], blockBatchOrm := orm.NewBlockBatch(db)
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig) err = db.Transaction(func(tx *gorm.DB) error {
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
// add fake batch if dbTxErr != nil {
dbTx, err := db.Beginx() return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
assert.NoError(t, dbTx.Commit())
// check db status // check db status
batch, err := db.GetLatestBatch() batch, err := blockBatchOrm.GetLatestBatch()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOraclePending) assert.Equal(t, batch.OracleStatus, types.GasOraclePending)
assert.Equal(t, batch.OracleTxHash.Valid, false)
// relay gas price // relay gas price
l2Relayer.ProcessGasPriceOracle() l2Relayer.ProcessGasPriceOracle()
batch, err = db.GetLatestBatch() batch, err = blockBatchOrm.GetLatestBatch()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOracleImporting) assert.Equal(t, batch.OracleStatus, types.GasOracleImporting)
assert.Equal(t, batch.OracleTxHash.Valid, true)
} }

View File

@@ -13,19 +13,15 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" "scroll-tech/bridge/internal/utils"
"scroll-tech/database/migrate"
) )
func testRelayL1MessageSucceed(t *testing.T) { func testRelayL1MessageSucceed(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -56,21 +52,22 @@ func testRelayL1MessageSucceed(t *testing.T) {
// l1 watch process events // l1 watch process events
l1Watcher.FetchContractEvent() l1Watcher.FetchContractEvent()
l1MessageOrm := orm.NewL1Message(db)
// check db status // check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64()) msg, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgPending) assert.Equal(t, msg.Status, types.MsgPending)
assert.Equal(t, msg.Target, l2Auth.From.String()) assert.Equal(t, msg.Target, l2Auth.From.String())
// process l1 messages // process l1 messages
l1Relayer.ProcessSavedEvents() l1Relayer.ProcessSavedEvents()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64()) msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgSubmitted) assert.Equal(t, msg.Status, types.MsgSubmitted)
relayTxHash, err := db.GetRelayL1MessageTxHash(nonce.Uint64()) l1Message, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid) assert.NotNil(t, l1Message.Layer2Hash)
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String)) relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(l1Message.Layer2Hash))
assert.NoError(t, err) assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx) relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -78,7 +75,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
// fetch message relayed events // fetch message relayed events
l2Watcher.FetchContractEvent() l2Watcher.FetchContractEvent()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64()) msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed) assert.Equal(t, msg.Status, types.MsgConfirmed)
} }

View File

@@ -2,30 +2,29 @@ package tests
import ( import (
"context" "context"
"errors"
"math/big" "math/big"
"testing" "testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testRelayL2MessageSucceed(t *testing.T) { func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -50,24 +49,27 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx) sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err) assert.NoError(t, err)
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil { if sendReceipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed") t.Fatalf("Call failed")
} }
// l2 watch process events // l2 watch process events
l2Watcher.FetchContractEvent() l2Watcher.FetchContractEvent()
l2MessageOrm := orm.NewL2Message(db)
blockTraceOrm := orm.NewBlockTrace(db)
blockBatchOrm := orm.NewBlockBatch(db)
// check db status // check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64()) msg, err := l2MessageOrm.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgPending) assert.Equal(t, msg.Status, types.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String()) assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String()) assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks // add fake blocks
traces := []*types.WrappedBlock{ traces := []*bridgeTypes.WrappedBlock{
{ {
Header: &geth_types.Header{ Header: &gethTypes.Header{
Number: sendReceipt.BlockNumber, Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{}, ParentHash: common.Hash{},
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
@@ -77,46 +79,57 @@ func testRelayL2MessageSucceed(t *testing.T) {
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}, },
} }
assert.NoError(t, db.InsertWrappedBlocks(traces)) assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{ parentBatch := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{ batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
traces[0],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String() batchHash := batchData.Hash().String()
// add fake batch // add fake batch
dbTx, err := db.Beginx() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
// add dummy proof // add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
// process pending batch and check status // process pending batch and check status
l2Relayer.SendCommitTx([]*types.BatchData{batchData}) l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData})
status, err := db.GetRollupStatus(batchHash) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitting, status) assert.Equal(t, 1, len(statuses))
commitTxHash, err := db.GetCommitTxHash(batchHash) assert.Equal(t, types.RollupCommitting, statuses[0])
blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid) assert.Equal(t, 1, len(blockBatches))
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String)) assert.NotNil(t, blockBatches[0].CommitTxHash)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash))
assert.NoError(t, err) assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -125,19 +138,24 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch CommitBatch rollup events // fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitted, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitted, statuses[0])
// process committed batch and check status // process committed batch and check status
l2Relayer.ProcessCommittedBatches() l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status) assert.Equal(t, 1, len(statuses))
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash) assert.Equal(t, types.RollupFinalizing, statuses[0])
blockBatchWitchFinalizeTxHash, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid) assert.Equal(t, 1, len(blockBatchWitchFinalizeTxHash))
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String)) assert.Equal(t, true, blockBatchWitchFinalizeTxHash[0].FinalizeTxHash)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatchWitchFinalizeTxHash[0].FinalizeTxHash))
assert.NoError(t, err) assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -146,19 +164,23 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch FinalizeBatch events // fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalized, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
// process l2 messages // process l2 messages
l2Relayer.ProcessSavedEvents() l2Relayer.ProcessSavedEvents()
msg, err = db.GetL2MessageByNonce(nonce.Uint64()) msg, err = l2MessageOrm.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgSubmitted) assert.Equal(t, msg.Status, types.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
l2Messages, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"nonce": nonce}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid) assert.Equal(t, 1, len(l2Messages))
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String)) assert.Equal(t, true, l2Messages[0].Layer1Hash)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(l2Messages[0].Layer1Hash))
assert.NoError(t, err) assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx) relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -167,7 +189,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch message relayed events // fetch message relayed events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64()) msg, err = l2MessageOrm.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed) assert.Equal(t, msg.Status, types.MsgConfirmed)
} }

View File

@@ -2,29 +2,28 @@ package tests
import ( import (
"context" "context"
"errors"
"gorm.io/gorm"
"math/big" "math/big"
"testing" "testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind" "github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database" bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/database/migrate" "scroll-tech/bridge/internal/utils"
) )
func testCommitBatchAndFinalizeBatch(t *testing.T) { func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db. db := setupDB(t)
db, err := database.NewOrmFactory(cfg.DBConfig) defer utils.CloseDB(db)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t) prepareContracts(t)
@@ -37,58 +36,76 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
blockTraceOrm := orm.NewBlockTrace(db)
// add some blocks to db // add some blocks to db
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*bridgeTypes.WrappedBlock
var parentHash common.Hash var parentHash common.Hash
for i := 1; i <= 10; i++ { for i := 1; i <= 10; i++ {
header := geth_types.Header{ header := gethTypes.Header{
Number: big.NewInt(int64(i)), Number: big.NewInt(int64(i)),
ParentHash: parentHash, ParentHash: parentHash,
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
} }
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{ wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{
Header: &header, Header: &header,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawTrieRoot: common.Hash{},
}) })
parentHash = header.Hash() parentHash = header.Hash()
} }
assert.NoError(t, db.InsertWrappedBlocks(wrappedBlocks)) assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(wrappedBlocks))
parentBatch := &types.BlockBatch{ parentBatch := &orm.BlockBatch{
Index: 0, Index: 0,
Hash: "0x0000000000000000000000000000000000000000", Hash: "0x0000000000000000000000000000000000000000",
} }
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
tmpWrapBlocks := []*bridgeTypes.WrappedBlock{
wrappedBlocks[0], wrappedBlocks[0],
wrappedBlocks[1], wrappedBlocks[1],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig) }
batchData := bridgeTypes.NewBatchData(parentBatch, tmpWrapBlocks, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String() batchHash := batchData.Hash().String()
// add one batch to db blockBatchOrm := orm.NewBlockBatch(db)
dbTx, err := db.Beginx() err = db.Transaction(func(tx *gorm.DB) error {
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
if dbTxErr != nil {
return dbTxErr
}
if rowsAffected != 1 {
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
return dbTxErr
}
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash)
if dbTxErr != nil {
return dbTxErr
}
return nil
})
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
// process pending batch and check status // process pending batch and check status
l2Relayer.SendCommitTx([]*types.BatchData{batchData}) l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData})
status, err := db.GetRollupStatus(batchHash) statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitting, status) assert.Equal(t, 1, len(statuses))
commitTxHash, err := db.GetCommitTxHash(batchHash) assert.Equal(t, types.RollupCommitting, statuses[0])
blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid) assert.Equal(t, 1, len(blockBatches))
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String)) assert.NotNil(t, true, blockBatches[0].CommitTxHash)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash))
assert.NoError(t, err) assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -97,28 +114,33 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events // fetch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupCommitted, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitted, statuses[0])
// add dummy proof // add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100) err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err) assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
// process committed batch and check status // process committed batch and check status
l2Relayer.ProcessCommittedBatches() l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status) assert.Equal(t, 1, len(statuses))
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash) assert.Equal(t, types.RollupFinalizing, statuses[0])
blockBatches, err = blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid) assert.Equal(t, 1, len(blockBatches))
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String)) assert.NotNil(t, blockBatches[0].FinalizeTxHash)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].FinalizeTxHash))
assert.NoError(t, err) assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err) assert.NoError(t, err)
@@ -127,7 +149,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events // fetch rollup events
err = l1Watcher.FetchContractEvent() err = l1Watcher.FetchContractEvent()
assert.NoError(t, err) assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash) statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, types.RollupFinalized, status) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
} }

View File

@@ -1,199 +0,0 @@
package watcher
import (
"context"
"fmt"
"math"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/common/types"
)
func testBatchProposerProposeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
p := &BatchProposer{
batchGasThreshold: 1000,
batchTxNumThreshold: 10,
batchTimeSec: 300,
commitCalldataSizeLimit: 500,
orm: db,
}
patchGuard := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: longData,
}},
}}, nil
}
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: "short",
}},
}}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
block1 := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := &types.BlockInfo{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
block3 := &types.BlockInfo{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
block4 := &types.BlockInfo{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
blockOutdated := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
blockWithLongData := &types.BlockInfo{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
testCases := []struct {
description string
blocks []*types.BlockInfo
expectedRes bool
}{
{"Empty block list", []*types.BlockInfo{}, false},
{"Single block exceeding gas threshold", []*types.BlockInfo{block4}, true},
{"Single block exceeding transaction number threshold", []*types.BlockInfo{block3}, true},
{"Multiple blocks meeting thresholds", []*types.BlockInfo{block1, block2, block3}, true},
{"Multiple blocks not meeting thresholds", []*types.BlockInfo{block1, block2}, false},
{"Outdated and valid block", []*types.BlockInfo{blockOutdated, block2}, true},
{"Single block with long data", []*types.BlockInfo{blockWithLongData}, true},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
})
}
}
func testBatchProposerBatchGeneration(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.TryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func testBatchProposerGracefulRestart(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
batchData1 := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}, []*types.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}