mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
22 Commits
alpha-v2.4
...
v3.0.10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b2cc62efe | ||
|
|
807b7c7f33 | ||
|
|
454f032c9f | ||
|
|
d1c4fa716d | ||
|
|
de1e40d19c | ||
|
|
76b5a6c751 | ||
|
|
bad77eac2f | ||
|
|
5d761ad812 | ||
|
|
4042bea6db | ||
|
|
de7c38a903 | ||
|
|
41e2d960d8 | ||
|
|
170bc08207 | ||
|
|
d3fc4e1606 | ||
|
|
77749477db | ||
|
|
1a5df6f4d7 | ||
|
|
826280253a | ||
|
|
d376c903af | ||
|
|
179c6ee978 | ||
|
|
165c9092da | ||
|
|
54c28fa512 | ||
|
|
3c7c41e1bb | ||
|
|
2962fa4b0e |
8
.github/workflows/bridge.yml
vendored
8
.github/workflows/bridge.yml
vendored
@@ -66,3 +66,11 @@ jobs:
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# docker-build:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
# - run: make docker
|
||||
|
||||
15
.github/workflows/coordinator.yml
vendored
15
.github/workflows/coordinator.yml
vendored
@@ -62,3 +62,18 @@ jobs:
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# docker-build:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
# - name: Build and push
|
||||
# uses: docker/build-push-action@v2
|
||||
# with:
|
||||
# context: .
|
||||
# file: ./build/dockerfiles/coordinator.Dockerfile
|
||||
# push: false
|
||||
# # cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# # cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
65
.github/workflows/docker.yaml
vendored
Normal file
65
.github/workflows/docker.yaml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v**
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Build and push event_watcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/event-watcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/msg_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
47
Jenkinsfile
vendored
47
Jenkinsfile
vendored
@@ -28,7 +28,7 @@ pipeline {
|
||||
}
|
||||
stage('Check Bridge Compilation') {
|
||||
steps {
|
||||
sh 'make -C bridge bridge'
|
||||
sh 'make -C bridge bridge_bins'
|
||||
}
|
||||
}
|
||||
stage('Check Coordinator Compilation') {
|
||||
@@ -42,16 +42,6 @@ pipeline {
|
||||
sh 'make -C database db_cli'
|
||||
}
|
||||
}
|
||||
stage('Check Bridge Docker Build') {
|
||||
steps {
|
||||
sh 'make -C bridge docker'
|
||||
}
|
||||
}
|
||||
stage('Check Coordinator Docker Build') {
|
||||
steps {
|
||||
sh 'make -C coordinator docker'
|
||||
}
|
||||
}
|
||||
stage('Check Database Docker Build') {
|
||||
steps {
|
||||
sh 'make -C database docker'
|
||||
@@ -61,44 +51,29 @@ pipeline {
|
||||
}
|
||||
stage('Parallel Test') {
|
||||
parallel{
|
||||
stage('Test bridge package') {
|
||||
stage('Race test common package') {
|
||||
steps {
|
||||
sh 'go test -v -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
|
||||
}
|
||||
}
|
||||
stage('Test common package') {
|
||||
steps {
|
||||
sh 'go test -v -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Test coordinator package') {
|
||||
steps {
|
||||
sh 'go test -v -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
|
||||
}
|
||||
}
|
||||
stage('Test database package') {
|
||||
steps {
|
||||
sh 'go test -v -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
|
||||
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/bridge/...'
|
||||
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic scroll-tech/bridge/...'
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/coordinator/...'
|
||||
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic scroll-tech/coordinator/...'
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/database/...'
|
||||
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
|
||||
|
||||
For a more comprehensive doc, see [`docs/`](./docs).
|
||||
|
||||
@@ -8,8 +8,23 @@ mock_abi:
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
|
||||
|
||||
bridge: ## Builds the Bridge instance.
|
||||
go build -o $(PWD)/build/bin/bridge ./cmd
|
||||
bridge_bins: ## Builds the Bridge bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
|
||||
gas_oracle: ## Builds the gas_oracle bin
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
message_relayer: ## Builds the message_relayer bin
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
|
||||
rollup_relayer: ## Builds the rollup_relayer bin
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
@@ -20,8 +35,14 @@ lint: ## Lint the files - used for CI
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
|
||||
|
||||
docker_push:
|
||||
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up Bridge app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "bridge"
|
||||
app.Usage = "The Scroll Bridge"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `bridge-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "bridge-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
// Init db connection.
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
var (
|
||||
l1Backend *l1.Backend
|
||||
l2Backend *l2.Backend
|
||||
)
|
||||
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
|
||||
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
l1Backend.Stop()
|
||||
l2Backend.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = l1Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l1 backend", "error", err)
|
||||
}
|
||||
if err = l2Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l2 backend", "error", err)
|
||||
}
|
||||
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
l2Backend.APIs())
|
||||
if err != nil {
|
||||
log.Crit("Could not start RPC api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
|
||||
log.Info("Start bridge successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run run bridge cmd instance.
|
||||
func Run() {
|
||||
// Run the bridge.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunBridge(t *testing.T) {
|
||||
bridge := cmd.NewCmd("bridge-test", "--version")
|
||||
defer bridge.WaitExit()
|
||||
|
||||
// wait result
|
||||
bridge.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
|
||||
bridge.RunApp(nil)
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
&httpEnabledFlag,
|
||||
&httpListenAddrFlag,
|
||||
&httpPortFlag,
|
||||
}
|
||||
// httpEnabledFlag enable rpc server.
|
||||
httpEnabledFlag = cli.BoolFlag{
|
||||
Name: "http",
|
||||
Usage: "Enable the HTTP-RPC server",
|
||||
Value: false,
|
||||
}
|
||||
// httpListenAddrFlag set the http address.
|
||||
httpListenAddrFlag = cli.StringFlag{
|
||||
Name: "http.addr",
|
||||
Usage: "HTTP-RPC server listening interface",
|
||||
Value: "localhost",
|
||||
}
|
||||
// httpPortFlag set http.port.
|
||||
httpPortFlag = cli.IntFlag{
|
||||
Name: "http.port",
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8290,
|
||||
}
|
||||
)
|
||||
114
bridge/cmd/event_watcher/app/app.go
Normal file
114
bridge/cmd/event_watcher/app/app.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up event-watcher app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "event-watcher"
|
||||
app.Usage = "The Scroll Event Watcher"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `event-watcher-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "event-watcher-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
|
||||
|
||||
go cutils.Loop(subCtx, 10*time.Second, func() {
|
||||
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", loopErr)
|
||||
}
|
||||
})
|
||||
|
||||
// Start l2 watcher process
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
// Finish start all l2 functions
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run event watcher cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/event_watcher/main.go
Normal file
7
bridge/cmd/event_watcher/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
136
bridge/cmd/gas_oracle/app/app.go
Normal file
136
bridge/cmd/gas_oracle/app/app.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/utils"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up gas-oracle app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "gas-oracle"
|
||||
app.Usage = "The Scroll Gas Oracle"
|
||||
app.Description = "Scroll Gas Oracle."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `gas-oracle-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "gas-oracle-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
}
|
||||
|
||||
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
|
||||
}
|
||||
})
|
||||
|
||||
// Start l1relayer process
|
||||
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/gas_oracle/main.go
Normal file
7
bridge/cmd/gas_oracle/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
118
bridge/cmd/msg_relayer/app/app.go
Normal file
118
bridge/cmd/msg_relayer/app/app.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up message-relayer app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "message-relayer"
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "message-relayer-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
|
||||
// Init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Start l2relayer process
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/msg_relayer/main.go
Normal file
7
bridge/cmd/msg_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
133
bridge/cmd/rollup_relayer/app/app.go
Normal file
133
bridge/cmd/rollup_relayer/app/app.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/utils"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up rollup-relayer app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "rollup-relayer"
|
||||
app.Usage = "The Scroll Rollup Relayer"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `rollup-relayer-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "rollup-relayer-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
}
|
||||
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
|
||||
})
|
||||
|
||||
// Batch proposer loop
|
||||
go cutils.Loop(subCtx, 2*time.Second, func() {
|
||||
batchProposer.TryProposeBatch()
|
||||
batchProposer.TryCommitBatches()
|
||||
})
|
||||
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run rollup relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/rollup_relayer/main.go
Normal file
7
bridge/cmd/rollup_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -19,7 +19,8 @@
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 10
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
@@ -53,7 +54,8 @@
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 10
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
|
||||
@@ -18,6 +18,8 @@ type L2Config struct {
|
||||
L2MessengerAddress common.Address `json:"l2_messenger_address"`
|
||||
// The L2MessageQueue contract address deployed on layer 2 chain.
|
||||
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
|
||||
// The WithdrawTrieRootSlot in L2MessageQueue contract.
|
||||
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
// The batch_proposer config
|
||||
|
||||
@@ -33,6 +33,8 @@ type SenderConfig struct {
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
// The interval (in seconds) to check balance and top up sender's accounts
|
||||
CheckBalanceTime uint64 `json:"check_balance_time"`
|
||||
// The sender's pending count limit.
|
||||
PendingLimit int `json:"pending_limit,omitempty"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.18
|
||||
|
||||
require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
|
||||
@@ -66,6 +66,8 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// Backend manage the resources and services of L1 backend.
|
||||
// The backend should monitor events in layer 1 and relay transactions to layer 2
|
||||
type Backend struct {
|
||||
cfg *config.L1Config
|
||||
watcher *Watcher
|
||||
relayer *Layer1Relayer
|
||||
orm database.OrmFactory
|
||||
}
|
||||
|
||||
// New returns a new instance of Backend.
|
||||
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
|
||||
client, err := ethclient.Dial(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
watcher: watcher,
|
||||
relayer: relayer,
|
||||
orm: orm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start Backend module.
|
||||
func (l1 *Backend) Start() error {
|
||||
l1.watcher.Start()
|
||||
l1.relayer.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop Backend module.
|
||||
func (l1 *Backend) Stop() {
|
||||
l1.watcher.Stop()
|
||||
l1.relayer.Stop()
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
// docker consider handler.
|
||||
base *docker.App
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
base.RunImages(t)
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
}
|
||||
|
||||
func TestL1(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("testStartWatcher", testStartWatcher)
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// Backend manage the resources and services of L2 backend.
|
||||
// The backend should monitor events in layer 2 and relay transactions to layer 1
|
||||
type Backend struct {
|
||||
cfg *config.L2Config
|
||||
watcher *WatcherClient
|
||||
relayer *Layer2Relayer
|
||||
batchProposer *BatchProposer
|
||||
orm database.OrmFactory
|
||||
}
|
||||
|
||||
// New returns a new instance of Backend.
|
||||
func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*Backend, error) {
|
||||
client, err := ethclient.Dial(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: initialize watcher before relayer to keep DB consistent.
|
||||
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
|
||||
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, orm)
|
||||
|
||||
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
watcher: watcher,
|
||||
relayer: relayer,
|
||||
batchProposer: batchProposer,
|
||||
orm: orm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start Backend module.
|
||||
func (l2 *Backend) Start() error {
|
||||
l2.watcher.Start()
|
||||
l2.relayer.Start()
|
||||
l2.batchProposer.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop Backend module.
|
||||
func (l2 *Backend) Stop() {
|
||||
l2.batchProposer.Stop()
|
||||
l2.relayer.Stop()
|
||||
l2.watcher.Stop()
|
||||
}
|
||||
|
||||
// APIs collect API modules.
|
||||
func (l2 *Backend) APIs() []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "l2",
|
||||
Version: "1.0",
|
||||
Service: WatcherAPI(l2.watcher),
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package l2
|
||||
|
||||
// WatcherAPI watcher api service
|
||||
type WatcherAPI interface {
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
package l1
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
@@ -31,14 +29,6 @@ var (
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultMessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
// 1. fetch pending L1Message from db
|
||||
// 2. relay pending message to layer 2 node
|
||||
@@ -53,11 +43,9 @@ type Layer1Relayer struct {
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
l2MessengerABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
gasOracleCh <-chan *sender.Confirmation
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
@@ -65,8 +53,6 @@ type Layer1Relayer struct {
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
@@ -96,21 +82,19 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
|
||||
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
return &Layer1Relayer{
|
||||
l1Relayer := &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
gasOracleCh: gasOracleSender.ConfirmChan(),
|
||||
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
@@ -118,9 +102,11 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
go l1Relayer.handleConfirmLoop(ctx)
|
||||
return l1Relayer, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
@@ -138,7 +124,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
|
||||
for _, msg := range msgs {
|
||||
if err = r.processSavedEvent(msg); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
@@ -153,7 +139,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -203,7 +189,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
}
|
||||
return
|
||||
@@ -220,57 +206,43 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer1Relayer) Start() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go utils.Loop(ctx, 2*time.Second, r.ProcessSavedEvents)
|
||||
go utils.Loop(ctx, 2*time.Second, r.ProcessGasPriceOracle)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageCh:
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleCh:
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the relayer module, for a graceful shutdown.
|
||||
func (r *Layer1Relayer) Stop() {
|
||||
close(r.stopCh)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l1
|
||||
package relayer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/relayer"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
@@ -19,9 +21,7 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
relayer.Start()
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"math/big"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -22,8 +21,6 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database"
|
||||
|
||||
cutil "scroll-tech/common/utils"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
@@ -40,14 +37,6 @@ var (
|
||||
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultMessageRelayMinGasLimit = 200000 // should be enough for both ERC20 and ETH relay
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
// 1. Committing and finalizing L2 blocks on L1
|
||||
// 2. Relaying messages from L2 to L1
|
||||
@@ -63,15 +52,12 @@ type Layer2Relayer struct {
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
rollupSender *sender.Sender
|
||||
rollupCh <-chan *sender.Confirmation
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
gasOracleCh <-chan *sender.Confirmation
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
@@ -91,8 +77,6 @@ type Layer2Relayer struct {
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
@@ -126,27 +110,24 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
|
||||
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
return &Layer2Relayer{
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
|
||||
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.ScrollChainABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
gasOracleCh: gasOracleSender.ConfirmChan(),
|
||||
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
@@ -158,8 +139,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
|
||||
processingMessage: sync.Map{},
|
||||
processingBatchesCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
}
|
||||
|
||||
const processMsgLimit = 100
|
||||
@@ -198,7 +180,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
return
|
||||
@@ -247,11 +229,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return err
|
||||
@@ -297,7 +279,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
|
||||
}
|
||||
return
|
||||
@@ -343,7 +325,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
|
||||
txID := crypto.Keccak256Hash(bytes).String()
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
|
||||
}
|
||||
return err
|
||||
@@ -493,7 +475,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
|
||||
}
|
||||
return
|
||||
@@ -516,65 +498,20 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer2Relayer) Start() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
go cutil.Loop(ctx, time.Second, r.ProcessSavedEvents)
|
||||
go cutil.Loop(ctx, time.Second, r.ProcessCommittedBatches)
|
||||
go cutil.Loop(ctx, time.Second, r.ProcessGasPriceOracle)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleCh:
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the relayer module, for a graceful shutdown.
|
||||
func (r *Layer2Relayer) Stop() {
|
||||
close(r.stopCh)
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
if !confirmation.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
return
|
||||
}
|
||||
|
||||
transactionType := "Unknown"
|
||||
// check whether it is message relay transaction
|
||||
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
|
||||
transactionType = "MessageRelay"
|
||||
var status types.MsgStatus
|
||||
if confirmation.IsSuccessful {
|
||||
status = types.MsgConfirmed
|
||||
} else {
|
||||
status = types.MsgRelayFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), types.MsgConfirmed, confirmation.TxHash.String())
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
|
||||
}
|
||||
@@ -586,9 +523,16 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
batchHashes := batchBatches.([]string)
|
||||
var status types.RollupStatus
|
||||
if confirmation.IsSuccessful {
|
||||
status = types.RollupCommitted
|
||||
} else {
|
||||
status = types.RollupCommitFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
for _, batchHash := range batchHashes {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), types.RollupCommitted)
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
|
||||
}
|
||||
@@ -600,8 +544,15 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
// check whether it is proof finalization transaction
|
||||
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
transactionType = "ProofFinalization"
|
||||
var status types.RollupStatus
|
||||
if confirmation.IsSuccessful {
|
||||
status = types.RollupFinalized
|
||||
} else {
|
||||
status = types.RollupFinalizeFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), types.RollupFinalized)
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
|
||||
}
|
||||
@@ -610,3 +561,32 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2
|
||||
package relayer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/relayer"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
@@ -39,11 +41,9 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
relayer.Start()
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
@@ -54,27 +54,36 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
err = db.SaveL2Messages(context.Background(), templateL2Message)
|
||||
assert.NoError(t, err)
|
||||
|
||||
traces := []*geth_types.BlockTrace{
|
||||
traces := []*types.WrappedBlock{
|
||||
{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height)),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, db.InsertL2BlockTraces(traces))
|
||||
assert.NoError(t, db.InsertWrappedBlocks(traces))
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
}
|
||||
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
@@ -100,10 +109,15 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
}
|
||||
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
@@ -136,9 +150,8 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -198,13 +211,13 @@ func genBatchData(t *testing.T, index uint64) *types.BatchData {
|
||||
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
// unmarshal blockTrace
|
||||
blockTrace := &geth_types.BlockTrace{}
|
||||
err = json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
wrappedBlock := &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock)
|
||||
assert.NoError(t, err)
|
||||
blockTrace.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
|
||||
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: index,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
return types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
|
||||
}
|
||||
11
bridge/relayer/params.go
Normal file
11
bridge/relayer/params.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package relayer
|
||||
|
||||
const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
|
||||
|
||||
defaultL2MessageRelayMinGasLimit = 200000
|
||||
)
|
||||
108
bridge/relayer/relayer_test.go
Normal file
108
bridge/relayer/relayer_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package relayer_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
|
||||
// batch data
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunImages(t)
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
|
||||
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
|
||||
}
|
||||
batchData1 = types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
StateRoot: batchData1.Batch.NewStateRoot.String(),
|
||||
}
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
|
||||
|
||||
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
if err := setupEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
}
|
||||
@@ -6,21 +6,19 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmapV2 "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,6 +35,12 @@ const (
|
||||
var (
|
||||
// ErrNoAvailableAccount indicates no available account error in the account pool.
|
||||
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
|
||||
// ErrFullPending sender's pending pool is full.
|
||||
ErrFullPending = errors.New("sender's pending pool is full")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPendingLimit = 10
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
@@ -74,9 +78,9 @@ type Sender struct {
|
||||
// account fields.
|
||||
auths *accountPool
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
pendingTxs sync.Map // Mapping from nonce to pending transaction
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
stopCh chan struct{}
|
||||
@@ -116,6 +120,11 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
}
|
||||
}
|
||||
|
||||
// initialize pending limit with a default value
|
||||
if config.PendingLimit == 0 {
|
||||
config.PendingLimit = defaultPendingLimit
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -125,7 +134,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: sync.Map{},
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -134,6 +143,21 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// PendingCount returns the current number of pending txs.
|
||||
func (s *Sender) PendingCount() int {
|
||||
return s.pendingTxs.Count()
|
||||
}
|
||||
|
||||
// PendingLimit returns the maximum number of pending txs the sender can handle.
|
||||
func (s *Sender) PendingLimit() int {
|
||||
return s.config.PendingLimit
|
||||
}
|
||||
|
||||
// IsFull returns true if the sender's pending tx pool is full.
|
||||
func (s *Sender) IsFull() bool {
|
||||
return s.pendingTxs.Count() >= s.config.PendingLimit
|
||||
}
|
||||
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
@@ -159,21 +183,24 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
if s.IsFull() {
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
// get
|
||||
auth := s.auths.getAccount()
|
||||
if auth == nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
return common.Hash{}, ErrNoAvailableAccount
|
||||
}
|
||||
|
||||
defer s.auths.releaseAccount(auth)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -194,7 +221,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
}
|
||||
s.pendingTxs.Store(ID, pending)
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
||||
@@ -335,17 +362,17 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
}
|
||||
}
|
||||
|
||||
s.pendingTxs.Range(func(key, value interface{}) bool {
|
||||
for item := range s.pendingTxs.IterBuffered() {
|
||||
key, pending := item.Key, item.Val
|
||||
// ignore empty id, since we use empty id to occupy pending task
|
||||
if value == nil || reflect.ValueOf(value).IsNil() {
|
||||
return true
|
||||
if pending == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pending := value.(*PendingTransaction)
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
s.pendingTxs.Delete(key)
|
||||
s.pendingTxs.Remove(key)
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
ID: pending.id,
|
||||
@@ -376,7 +403,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
// We need to stop the program and manually handle the situation.
|
||||
if strings.Contains(err.Error(), "nonce") {
|
||||
// This key can be deleted
|
||||
s.pendingTxs.Delete(key)
|
||||
s.pendingTxs.Remove(key)
|
||||
// Try get receipt by the latest replaced tx hash
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
@@ -398,8 +425,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
pending.submitAt = number
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Loop is the main event loop
|
||||
|
||||
@@ -57,6 +57,8 @@ func TestSender(t *testing.T) {
|
||||
// Setup
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("test pending limit", func(t *testing.T) { testPendLimit(t) })
|
||||
|
||||
t.Run("test min gas limit", func(t *testing.T) { testMinGasLimit(t) })
|
||||
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
@@ -64,6 +66,21 @@ func TestSender(t *testing.T) {
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
}
|
||||
|
||||
func testPendLimit(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
senderCfg.PendingLimit = 2
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
defer newSender.Stop()
|
||||
|
||||
for i := 0; i < newSender.PendingLimit(); i++ {
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
|
||||
}
|
||||
|
||||
func testMinGasLimit(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
@@ -100,6 +117,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
senderCfg.PendingLimit = batchSize * TXBatch
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -119,7 +137,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
if errors.Is(err, sender.ErrNoAvailableAccount) || errors.Is(err, sender.ErrFullPending) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -30,14 +30,13 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -81,12 +80,11 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
|
||||
// Create L2Relayer
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// add fake blocks
|
||||
traces := []*geth_types.BlockTrace{
|
||||
traces := []*types.WrappedBlock{
|
||||
{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(1),
|
||||
@@ -94,16 +92,17 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, db.InsertL2BlockTraces(traces))
|
||||
assert.NoError(t, db.InsertWrappedBlocks(traces))
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
|
||||
traces[0],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -33,16 +33,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
@@ -56,7 +54,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
|
||||
}
|
||||
|
||||
// l1 watch process events
|
||||
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
|
||||
l1Watcher.FetchContractEvent()
|
||||
|
||||
// check db status
|
||||
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
@@ -79,7 +77,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
|
||||
l2Watcher.FetchContractEvent()
|
||||
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, types.MsgConfirmed)
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -33,15 +33,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
|
||||
// Create L2Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
@@ -55,7 +55,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
}
|
||||
|
||||
// l2 watch process events
|
||||
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
|
||||
l2Watcher.FetchContractEvent()
|
||||
|
||||
// check db status
|
||||
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
|
||||
@@ -65,7 +65,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, msg.Target, l1Auth.From.String())
|
||||
|
||||
// add fake blocks
|
||||
traces := []*geth_types.BlockTrace{
|
||||
traces := []*types.WrappedBlock{
|
||||
{
|
||||
Header: &geth_types.Header{
|
||||
Number: sendReceipt.BlockNumber,
|
||||
@@ -73,16 +73,17 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, db.InsertL2BlockTraces(traces))
|
||||
assert.NoError(t, db.InsertWrappedBlocks(traces))
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
|
||||
traces[0],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
batchHash := batchData.Hash().String()
|
||||
@@ -122,7 +123,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch CommitBatch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
@@ -143,7 +144,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch FinalizeBatch events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
@@ -164,7 +165,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -30,16 +30,15 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*geth_types.BlockTrace
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
var parentHash common.Hash
|
||||
for i := 1; i <= 10; i++ {
|
||||
header := geth_types.Header{
|
||||
@@ -48,21 +47,22 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
traces = append(traces, &geth_types.BlockTrace{
|
||||
Header: &header,
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
})
|
||||
parentHash = header.Hash()
|
||||
}
|
||||
assert.NoError(t, db.InsertL2BlockTraces(traces))
|
||||
assert.NoError(t, db.InsertWrappedBlocks(wrappedBlocks))
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
traces[0],
|
||||
traces[1],
|
||||
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
|
||||
wrappedBlocks[0],
|
||||
wrappedBlocks[1],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
|
||||
batchHash := batchData.Hash().String()
|
||||
@@ -95,7 +95,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
@@ -125,7 +125,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,35 +1,33 @@
|
||||
package l2
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridgeabi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommitTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commit/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
|
||||
|
||||
bridgeL2BatchesCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/blocks/created/rate", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesTxsCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/txs/created/rate", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesGasCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/gas/created/rate", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
|
||||
@@ -84,15 +82,13 @@ type BatchProposer struct {
|
||||
|
||||
proofGenerationFreq uint64
|
||||
batchDataBuffer []*types.BatchData
|
||||
relayer *Layer2Relayer
|
||||
relayer *relayer.Layer2Relayer
|
||||
|
||||
piCfg *types.PublicInputHashConfig
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewBatchProposer will return a new instance of BatchProposer.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer {
|
||||
p := &BatchProposer{
|
||||
mutex: sync.Mutex{},
|
||||
ctx: ctx,
|
||||
@@ -108,42 +104,17 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
piCfg: cfg.PublicInputConfig,
|
||||
relayer: relayer,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// for graceful restart.
|
||||
p.recoverBatchDataBuffer()
|
||||
|
||||
// try to commit the leftover pending batches
|
||||
p.tryCommitBatches()
|
||||
p.TryCommitBatches()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Start the Listening process
|
||||
func (p *BatchProposer) Start() {
|
||||
go func() {
|
||||
if reflect.ValueOf(p.orm).IsNil() {
|
||||
panic("must run BatchProposer with DB")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(p.ctx)
|
||||
|
||||
go utils.Loop(ctx, 2*time.Second, func() {
|
||||
p.tryProposeBatch()
|
||||
p.tryCommitBatches()
|
||||
})
|
||||
|
||||
<-p.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the Watcher module, for a graceful shutdown.
|
||||
func (p *BatchProposer) Stop() {
|
||||
p.stopCh <- struct{}{}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recoverBatchDataBuffer() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
|
||||
@@ -215,7 +186,8 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) tryProposeBatch() {
|
||||
// TryProposeBatch will try to propose a batch.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
@@ -229,18 +201,23 @@ func (p *BatchProposer) tryProposeBatch() {
|
||||
return
|
||||
}
|
||||
|
||||
p.proposeBatch(blocks)
|
||||
batchCreated := p.proposeBatch(blocks)
|
||||
|
||||
// while size of batchDataBuffer < commitCalldataMinSize,
|
||||
// proposer keeps fetching and porposing batches.
|
||||
if p.getBatchDataBufferSize() >= p.commitCalldataMinSize {
|
||||
return
|
||||
}
|
||||
// wait for watcher to insert l2 traces.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if !batchCreated {
|
||||
// wait for watcher to insert l2 traces.
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) tryCommitBatches() {
|
||||
// TryCommitBatches will try to commit the pending batches.
|
||||
func (p *BatchProposer) TryCommitBatches() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
@@ -280,14 +257,14 @@ func (p *BatchProposer) tryCommitBatches() {
|
||||
log.Error("SendCommitTx failed", "error", err)
|
||||
} else {
|
||||
// pop the processed batches from the buffer
|
||||
bridgeL2BatchesCommitTotalCounter.Inc(1)
|
||||
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
|
||||
p.batchDataBuffer = p.batchDataBuffer[index:]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
|
||||
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
|
||||
if len(blocks) == 0 {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > p.batchGasThreshold {
|
||||
@@ -296,11 +273,11 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
|
||||
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
|
||||
bridgeL2BatchesCreatedRateMeter.Mark(1)
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
}
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > p.batchTxNumThreshold {
|
||||
@@ -309,11 +286,11 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
|
||||
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
|
||||
bridgeL2BatchesCreatedRateMeter.Mark(1)
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
}
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
var gasUsed, txNum uint64
|
||||
@@ -333,16 +310,18 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
if err := p.createBatchForBlocks(blocks); err != nil {
|
||||
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(txNum))
|
||||
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
|
||||
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks)))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
|
||||
@@ -368,16 +347,16 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
|
||||
}
|
||||
|
||||
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) {
|
||||
var traces []*geth_types.BlockTrace
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for _, block := range blocks {
|
||||
trs, err := p.orm.GetL2BlockTraces(map[string]interface{}{"hash": block.Hash})
|
||||
trs, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash})
|
||||
if err != nil || len(trs) != 1 {
|
||||
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
traces = append(traces, trs[0])
|
||||
wrappedBlocks = append(wrappedBlocks, trs[0])
|
||||
}
|
||||
return types.NewBatchData(parentBatch, traces, p.piCfg), nil
|
||||
return types.NewBatchData(parentBatch, wrappedBlocks, p.piCfg), nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2
|
||||
package watcher_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,13 +6,15 @@ import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
@@ -22,34 +24,49 @@ func testBatchProposerProposeBatch(t *testing.T) {
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
ctx := context.Background()
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
db.Close()
|
||||
}()
|
||||
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace1}))
|
||||
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
|
||||
wc.Start()
|
||||
defer wc.Stop()
|
||||
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
batch, err := db.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
// Create a new batch.
|
||||
batchData := types.NewBatchData(&types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: batch.Hash,
|
||||
StateRoot: batch.StateRoot,
|
||||
}, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
proposer.tryProposeBatch()
|
||||
proposer.TryProposeBatch()
|
||||
|
||||
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", 100))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(infos))
|
||||
|
||||
exist, err := db.BatchRecordExist(batchData1.Hash().Hex())
|
||||
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
}
|
||||
@@ -61,13 +78,26 @@ func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace2}))
|
||||
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
|
||||
|
||||
// Insert block batch into db.
|
||||
batchData1 := types.NewBatchData(&types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
}, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
StateRoot: batchData1.Batch.NewStateRoot.String(),
|
||||
}
|
||||
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
@@ -85,7 +115,7 @@ func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
assert.Equal(t, 1, len(batchHashes))
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
_ = watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
11
bridge/watcher/common.go
Normal file
11
bridge/watcher/common.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package watcher
|
||||
|
||||
import "github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
isSuccessful bool
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
package l1
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
@@ -19,8 +18,6 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database"
|
||||
|
||||
cutil "scroll-tech/common/utils"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
@@ -33,20 +30,14 @@ var (
|
||||
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
type rollupEvent struct {
|
||||
batchHash common.Hash
|
||||
txHash common.Hash
|
||||
status types.RollupStatus
|
||||
}
|
||||
|
||||
// Watcher will listen for smart contract events from Eth L1.
|
||||
type Watcher struct {
|
||||
// L1WatcherClient will listen for smart contract events from Eth L1.
|
||||
type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
db database.OrmFactory
|
||||
@@ -67,13 +58,10 @@ type Watcher struct {
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
stopCh chan bool
|
||||
}
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
@@ -92,9 +80,7 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
savedL1BlockHeight = startHeight
|
||||
}
|
||||
|
||||
stopCh := make(chan bool)
|
||||
|
||||
return &Watcher{
|
||||
return &L1WatcherClient{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
db: db,
|
||||
@@ -111,51 +97,11 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
stopCh: stopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the Watcher module.
|
||||
func (w *Watcher) Start() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(w.ctx)
|
||||
|
||||
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
} else {
|
||||
if err := w.FetchBlockHeader(number); err != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
} else {
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the Watcher module, for a graceful shutdown.
|
||||
func (w *Watcher) Stop() {
|
||||
w.stopCh <- true
|
||||
}
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
@@ -201,10 +147,15 @@ func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
|
||||
}
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
defer func() {
|
||||
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
@@ -317,7 +268,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l1
|
||||
package watcher_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/watcher"
|
||||
)
|
||||
|
||||
func testStartWatcher(t *testing.T) {
|
||||
@@ -23,7 +25,6 @@ func testStartWatcher(t *testing.T) {
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
watcher := watcher.NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
}
|
||||
@@ -1,16 +1,15 @@
|
||||
package l2
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
@@ -20,7 +19,6 @@ import (
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
cutil "scroll-tech/common/utils"
|
||||
"scroll-tech/database"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
@@ -30,22 +28,16 @@ import (
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2TracesFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2TracesFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
|
||||
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type WatcherClient struct {
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type L2WatcherClient struct {
|
||||
ctx context.Context
|
||||
event.Feed
|
||||
|
||||
@@ -58,25 +50,25 @@ type WatcherClient struct {
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
withdrawTrieRootSlot common.Hash
|
||||
|
||||
// The height of the block that the watcher has retrieved event logs
|
||||
processedMsgHeight uint64
|
||||
|
||||
stopped uint64
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
savedHeight = 0
|
||||
}
|
||||
|
||||
w := WatcherClient{
|
||||
w := L2WatcherClient{
|
||||
ctx: ctx,
|
||||
Client: client,
|
||||
orm: orm,
|
||||
@@ -86,10 +78,10 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L2ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L2MessageQueueABI,
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L2MessageQueueABI,
|
||||
withdrawTrieRootSlot: withdrawTrieRootSlot,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
stopped: 0,
|
||||
}
|
||||
|
||||
@@ -101,7 +93,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
return &w
|
||||
}
|
||||
|
||||
func (w *WatcherClient) initializeGenesis() error {
|
||||
func (w *L2WatcherClient) initializeGenesis() error {
|
||||
if count, err := w.orm.GetBatchCount(); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
@@ -116,15 +108,7 @@ func (w *WatcherClient) initializeGenesis() error {
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
blockTrace := &geth_types.BlockTrace{
|
||||
Coinbase: nil,
|
||||
Header: genesis,
|
||||
Transactions: []*geth_types.TransactionData{},
|
||||
StorageTrace: nil,
|
||||
ExecutionResults: []*geth_types.ExecutionResult{},
|
||||
MPTWitness: nil,
|
||||
}
|
||||
|
||||
blockTrace := &types.WrappedBlock{Header: genesis, Transactions: nil, WithdrawTrieRoot: common.Hash{}}
|
||||
batchData := types.NewGenesisBatchData(blockTrace)
|
||||
|
||||
if err = AddBatchInfoToDB(w.orm, batchData); err != nil {
|
||||
@@ -147,52 +131,16 @@ func (w *WatcherClient) initializeGenesis() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the Listening process
|
||||
func (w *WatcherClient) Start() {
|
||||
go func() {
|
||||
if reflect.ValueOf(w.orm).IsNil() {
|
||||
panic("must run L2 watcher with DB")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(w.ctx)
|
||||
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
} else {
|
||||
w.tryFetchRunningMissingBlocks(ctx, number)
|
||||
}
|
||||
})
|
||||
|
||||
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
} else {
|
||||
w.FetchContractEvent(number)
|
||||
}
|
||||
})
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the Watcher module, for a graceful shutdown.
|
||||
func (w *WatcherClient) Stop() {
|
||||
w.stopCh <- struct{}{}
|
||||
}
|
||||
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// try fetch missing blocks if inconsistent
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
|
||||
// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
heightInDB, err := w.orm.GetL2BlockTracesLatestHeight()
|
||||
heightInDB, err := w.orm.GetL2BlocksLatestHeight()
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlockTracesLatestHeight", "err", err)
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -214,24 +162,60 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2TracesFetchedHeightGauge.Update(int64(to))
|
||||
bridgeL2TracesFetchedGapGauge.Update(int64(blockHeight - to))
|
||||
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
|
||||
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var traces []*geth_types.BlockTrace
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block trace", "height", number)
|
||||
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
|
||||
func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
|
||||
txsData := make([]*geth_types.TransactionData, len(txs))
|
||||
for i, tx := range txs {
|
||||
v, r, s := tx.RawSignatureValues()
|
||||
txsData[i] = &geth_types.TransactionData{
|
||||
Type: tx.Type(),
|
||||
TxHash: tx.Hash().String(),
|
||||
Nonce: tx.Nonce(),
|
||||
ChainId: (*hexutil.Big)(tx.ChainId()),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: (*hexutil.Big)(tx.GasPrice()),
|
||||
To: tx.To(),
|
||||
Value: (*hexutil.Big)(tx.Value()),
|
||||
Data: hexutil.Encode(tx.Data()),
|
||||
IsCreate: tx.To() == nil,
|
||||
V: (*hexutil.Big)(v),
|
||||
R: (*hexutil.Big)(r),
|
||||
S: (*hexutil.Big)(s),
|
||||
}
|
||||
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
|
||||
traces = append(traces, trace)
|
||||
}
|
||||
if len(traces) > 0 {
|
||||
if err := w.orm.InsertL2BlockTraces(traces); err != nil {
|
||||
return txsData
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var blocks []*types.WrappedBlock
|
||||
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("failed to GetBlockByNumber: %v. number: %v", err2, number)
|
||||
}
|
||||
|
||||
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
|
||||
|
||||
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
|
||||
if err3 != nil {
|
||||
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &types.WrappedBlock{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
|
||||
})
|
||||
}
|
||||
|
||||
if len(blocks) > 0 {
|
||||
if err := w.orm.InsertWrappedBlocks(blocks); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -239,14 +223,18 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
|
||||
return nil
|
||||
}
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
func (w *L2WatcherClient) FetchContractEvent() {
|
||||
defer func() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
@@ -322,7 +310,7 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2
|
||||
package watcher_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -19,7 +19,9 @@ import (
|
||||
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
@@ -29,12 +31,16 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
ctx := context.Background()
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
l2db.Close()
|
||||
}()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2db)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
rc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
@@ -60,12 +66,17 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
ctx := context.Background()
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
db.Close()
|
||||
}()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
|
||||
wc.Start()
|
||||
defer wc.Stop()
|
||||
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -79,9 +90,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
@@ -128,7 +137,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
ctx := context.Background()
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
db.Close()
|
||||
}()
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
|
||||
assert.NoError(t, err)
|
||||
@@ -141,8 +156,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
@@ -195,9 +209,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *watcher.L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, db)
|
||||
return watcher.NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
@@ -209,3 +223,7 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
|
||||
assert.NoError(t, err)
|
||||
return auth
|
||||
}
|
||||
|
||||
func loopToFetchEvent(subCtx context.Context, watcher *watcher.L2WatcherClient) {
|
||||
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
|
||||
}
|
||||
@@ -1,12 +1,10 @@
|
||||
package l2
|
||||
package watcher_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -26,12 +24,8 @@ var (
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
blockTrace1 *geth_types.BlockTrace
|
||||
blockTrace2 *geth_types.BlockTrace
|
||||
|
||||
// batch data
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
@@ -54,35 +48,20 @@ func setupEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace1 = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, blockTrace1); err != nil {
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData1 = types.NewBatchData(parentBatch1, []*geth_types.BlockTrace{blockTrace1}, nil)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace2 = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, blockTrace2); err != nil {
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
}
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
|
||||
|
||||
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
|
||||
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -98,18 +77,13 @@ func TestFunction(t *testing.T) {
|
||||
if err := setupEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run l1 watcher test cases.
|
||||
t.Run("TestStartWatcher", testStartWatcher)
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
|
||||
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
|
||||
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
|
||||
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
|
||||
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
|
||||
26
build/dockerfiles/event_watcher.Dockerfile
Normal file
26
build/dockerfiles/event_watcher.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.18 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build event_watcher
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/event_watcher /bin/
|
||||
|
||||
ENTRYPOINT ["event_watcher"]
|
||||
@@ -2,4 +2,4 @@ assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
@@ -11,16 +11,16 @@ COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridge
|
||||
# Build gas_oracle
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
|
||||
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull bridge into a second stage deploy alpine container
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/bridge /bin/
|
||||
COPY --from=builder /bin/gas_oracle /bin/
|
||||
|
||||
ENTRYPOINT ["bridge"]
|
||||
ENTRYPOINT ["gas_oracle"]
|
||||
5
build/dockerfiles/gas_oracle.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/gas_oracle.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
26
build/dockerfiles/msg_relayer.Dockerfile
Normal file
26
build/dockerfiles/msg_relayer.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.18 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build msg_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
|
||||
|
||||
# Pull msg_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/msg_relayer /bin/
|
||||
|
||||
ENTRYPOINT ["msg_relayer"]
|
||||
5
build/dockerfiles/msg_relayer.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/msg_relayer.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
26
build/dockerfiles/rollup_relayer.Dockerfile
Normal file
26
build/dockerfiles/rollup_relayer.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.18 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
|
||||
ENTRYPOINT ["rollup_relayer"]
|
||||
5
build/dockerfiles/rollup_relayer.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/rollup_relayer.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -4,7 +4,7 @@ ${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
# ${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
#${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
package1=coverage.bridge.xml \
|
||||
|
||||
@@ -31,8 +31,10 @@ type Cmd struct {
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
//stdout bytes.Buffer
|
||||
Err error
|
||||
// open log flag.
|
||||
openLog bool
|
||||
// error channel
|
||||
ErrChan chan error
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
@@ -41,6 +43,7 @@ func NewCmd(name string, args ...string) *Cmd {
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: args,
|
||||
ErrChan: make(chan error, 10),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,12 +61,12 @@ func (c *Cmd) runCmd() {
|
||||
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = c
|
||||
cmd.Stderr = c
|
||||
_ = cmd.Run()
|
||||
c.ErrChan <- cmd.Run()
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (c *Cmd) RunCmd(parallel bool) {
|
||||
fmt.Println("cmd: ", c.args)
|
||||
fmt.Println("cmd:", c.args)
|
||||
if parallel {
|
||||
go c.runCmd()
|
||||
} else {
|
||||
@@ -71,12 +74,17 @@ func (c *Cmd) RunCmd(parallel bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// OpenLog open cmd log by this api.
|
||||
func (c *Cmd) OpenLog(open bool) {
|
||||
c.openLog = open
|
||||
}
|
||||
|
||||
func (c *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose {
|
||||
fmt.Printf("%s: %v", c.name, out)
|
||||
if verbose || c.openLog {
|
||||
fmt.Printf("%s:\n\t%v", c.name, out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
fmt.Printf("%s: %v", c.name, out)
|
||||
fmt.Printf("%s:\n\t%v", c.name, out)
|
||||
}
|
||||
go c.checkFuncs.IterCb(func(_ string, value interface{}) {
|
||||
check := value.(checkFunc)
|
||||
|
||||
@@ -38,9 +38,17 @@ func (c *Cmd) RunApp(waitResult func() bool) {
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (c *Cmd) WaitExit() {
|
||||
// Wait all the check funcs are finished or test status is failed.
|
||||
for !(c.Err != nil || c.checkFuncs.IsEmpty()) {
|
||||
<-time.After(time.Millisecond * 500)
|
||||
// Wait all the check functions are finished, interrupt loop when appear error.
|
||||
var err error
|
||||
for err == nil && !c.checkFuncs.IsEmpty() {
|
||||
select {
|
||||
case err = <-c.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("%s appear error durning running, err: %v\n", c.name, err)
|
||||
}
|
||||
default:
|
||||
<-time.After(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
@@ -56,7 +64,7 @@ func (c *Cmd) WaitExit() {
|
||||
// Interrupt send interrupt signal.
|
||||
func (c *Cmd) Interrupt() {
|
||||
c.mu.Lock()
|
||||
c.Err = c.cmd.Process.Signal(os.Interrupt)
|
||||
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ func (b *App) RunDBApp(t *testing.T, option, keyword string) {
|
||||
app.RunApp(nil)
|
||||
}
|
||||
|
||||
// Free clear all running images
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.l1gethImg != nil {
|
||||
_ = b.l1gethImg.Stop()
|
||||
|
||||
@@ -82,7 +82,7 @@ func (i *ImgDB) Endpoint() string {
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"docker", "run", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
@@ -114,8 +114,12 @@ func (i *ImgDB) isOk() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
@@ -93,10 +93,14 @@ func (i *ImgGeth) isOk() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
// Stop the docker container.
|
||||
@@ -121,7 +125,7 @@ func (i *ImgGeth) Stop() error {
|
||||
}
|
||||
|
||||
func (i *ImgGeth) prepare() []string {
|
||||
cmds := []string{"docker", "run", "--name", i.name}
|
||||
cmds := []string{"docker", "run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
|
||||
@@ -3,7 +3,7 @@ use libc::c_char;
|
||||
use std::cell::OnceCell;
|
||||
use types::eth::BlockTrace;
|
||||
use zkevm::circuit::AGG_DEGREE;
|
||||
use zkevm::utils::{load_or_create_params, load_or_create_seed};
|
||||
use zkevm::utils::{load_params, load_seed};
|
||||
use zkevm::{circuit::DEGREE, prover::Prover};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -15,9 +15,9 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
|
||||
|
||||
let params_path = c_char_to_str(params_path);
|
||||
let seed_path = c_char_to_str(seed_path);
|
||||
let params = load_or_create_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let seed = load_or_create_seed(seed_path).unwrap();
|
||||
let params = load_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let seed = load_seed(seed_path).unwrap();
|
||||
let p = Prover::from_params_and_seed(params, agg_params, seed);
|
||||
PROVER.set(p).unwrap();
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::fs::File;
|
||||
use std::io::Read;
|
||||
use zkevm::circuit::{AGG_DEGREE, DEGREE};
|
||||
use zkevm::prover::AggCircuitProof;
|
||||
use zkevm::utils::load_or_create_params;
|
||||
use zkevm::utils::load_params;
|
||||
use zkevm::verifier::Verifier;
|
||||
|
||||
static mut VERIFIER: Option<&Verifier> = None;
|
||||
@@ -20,8 +20,8 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
|
||||
let mut agg_vk = vec![];
|
||||
f.read_to_end(&mut agg_vk).unwrap();
|
||||
|
||||
let params = load_or_create_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let params = load_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
|
||||
|
||||
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
|
||||
VERIFIER = Some(Box::leak(v))
|
||||
|
||||
@@ -114,41 +114,41 @@ func (b *BatchData) Hash() *common.Hash {
|
||||
|
||||
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
|
||||
// included in this batch
|
||||
func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCfg *PublicInputHashConfig) *BatchData {
|
||||
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// set BatchIndex, ParentBatchHash
|
||||
batch.BatchIndex = parentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blockTraces))
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
|
||||
|
||||
var batchTxDataBuf bytes.Buffer
|
||||
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
|
||||
|
||||
for i, trace := range blockTraces {
|
||||
batchData.TotalTxNum += uint64(len(trace.Transactions))
|
||||
batchData.TotalL2Gas += trace.Header.GasUsed
|
||||
for i, block := range blocks {
|
||||
batchData.TotalTxNum += uint64(len(block.Transactions))
|
||||
batchData.TotalL2Gas += block.Header.GasUsed
|
||||
|
||||
// set baseFee to 0 when it's nil in the block header
|
||||
baseFee := trace.Header.BaseFee
|
||||
baseFee := block.Header.BaseFee
|
||||
if baseFee == nil {
|
||||
baseFee = big.NewInt(0)
|
||||
}
|
||||
|
||||
batch.Blocks[i] = abi.IScrollChainBlockContext{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: trace.Header.ParentHash,
|
||||
BlockNumber: trace.Header.Number.Uint64(),
|
||||
Timestamp: trace.Header.Time,
|
||||
BlockHash: block.Header.Hash(),
|
||||
ParentHash: block.Header.ParentHash,
|
||||
BlockNumber: block.Header.Number.Uint64(),
|
||||
Timestamp: block.Header.Time,
|
||||
BaseFee: baseFee,
|
||||
GasLimit: trace.Header.GasLimit,
|
||||
NumTransactions: uint16(len(trace.Transactions)),
|
||||
GasLimit: block.Header.GasLimit,
|
||||
NumTransactions: uint16(len(block.Transactions)),
|
||||
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
|
||||
}
|
||||
|
||||
// fill in RLP-encoded transactions
|
||||
for _, txData := range trace.Transactions {
|
||||
for _, txData := range block.Transactions {
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
@@ -170,15 +170,14 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
}
|
||||
|
||||
// set PrevStateRoot from the first block
|
||||
if i == 0 {
|
||||
batch.PrevStateRoot = trace.StorageTrace.RootBefore
|
||||
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
|
||||
}
|
||||
|
||||
// set NewStateRoot & WithdrawTrieRoot from the last block
|
||||
if i == len(blockTraces)-1 {
|
||||
batch.NewStateRoot = trace.Header.Root
|
||||
batch.WithdrawTrieRoot = trace.WithdrawTrieRoot
|
||||
if i == len(blocks)-1 {
|
||||
batch.NewStateRoot = block.Header.Root
|
||||
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,7 +192,7 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
|
||||
}
|
||||
|
||||
// NewGenesisBatchData generates the batch that contains the genesis block.
|
||||
func NewGenesisBatchData(genesisBlockTrace *types.BlockTrace) *BatchData {
|
||||
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
|
||||
header := genesisBlockTrace.Header
|
||||
if header.Number.Uint64() != 0 {
|
||||
panic("invalid genesis block trace: block number is not 0")
|
||||
|
||||
@@ -75,15 +75,7 @@ func TestNewGenesisBatch(t *testing.T) {
|
||||
"wrong genesis block header",
|
||||
)
|
||||
|
||||
blockTrace := &geth_types.BlockTrace{
|
||||
Coinbase: nil,
|
||||
Header: genesisBlock,
|
||||
Transactions: []*geth_types.TransactionData{},
|
||||
StorageTrace: nil,
|
||||
ExecutionResults: []*geth_types.ExecutionResult{},
|
||||
MPTWitness: nil,
|
||||
}
|
||||
|
||||
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
|
||||
batchData := NewGenesisBatchData(blockTrace)
|
||||
t.Log(batchData.Batch.Blocks[0])
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
|
||||
14
common/types/block.go
Normal file
14
common/types/block.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
@@ -82,6 +82,9 @@ const (
|
||||
|
||||
// MsgExpired represents the from_layer message status is expired
|
||||
MsgExpired
|
||||
|
||||
// MsgRelayFailed represents the from_layer message status is relay failed
|
||||
MsgRelayFailed
|
||||
)
|
||||
|
||||
// L1Message is structure of stored layer1 bridge message
|
||||
@@ -159,6 +162,7 @@ type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
Attempts uint8 `json:"attempts,omitempty"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
@@ -200,7 +204,7 @@ func (ps ProvingStatus) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// RollupStatus block_batch rollup_status (pending, committing, committed, finalizing, finalized)
|
||||
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
|
||||
type RollupStatus int
|
||||
|
||||
const (
|
||||
@@ -218,6 +222,10 @@ const (
|
||||
RollupFinalized
|
||||
// RollupFinalizationSkipped : batch finalization is skipped
|
||||
RollupFinalizationSkipped
|
||||
// RollupCommitFailed : rollup commit transaction confirmed but failed
|
||||
RollupCommitFailed
|
||||
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
|
||||
RollupFinalizeFailed
|
||||
)
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "alpha-v2.4"
|
||||
var tag = "v3.0.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -43,7 +43,7 @@ interface IScrollMessenger {
|
||||
*****************************/
|
||||
|
||||
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
|
||||
/// @param target The address of account who recieve the message.
|
||||
/// @param target The address of account who receive the message.
|
||||
/// @param value The amount of ether passed when call target contract.
|
||||
/// @param message The content of the message.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
|
||||
@@ -55,7 +55,7 @@ interface IScrollMessenger {
|
||||
) external payable;
|
||||
|
||||
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
|
||||
/// @param target The address of account who recieve the message.
|
||||
/// @param target The address of account who receive the message.
|
||||
/// @param value The amount of ether passed when call target contract.
|
||||
/// @param message The content of the message.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
|
||||
|
||||
@@ -117,6 +117,8 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
|
||||
return false, fmt.Errorf("the roller or session id doesn't exist, pubkey: %s, ID: %s", pubkey, proof.ID)
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastFinishedTimestampGauge(pubkey)
|
||||
|
||||
err := m.handleZkProof(pubkey, proof.ProofDetail)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"roller_manager_config": {
|
||||
"compression_level": 9,
|
||||
"rollers_per_session": 1,
|
||||
"session_attempts": 2,
|
||||
"collection_time": 180,
|
||||
"token_time_to_live": 60,
|
||||
"verifier": {
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNumberOfVerifierWorkers = 10
|
||||
defaultNumberOfVerifierWorkers = 10
|
||||
defaultNumberOfSessionRetryAttempts = 2
|
||||
)
|
||||
|
||||
// RollerManagerConfig loads sequencer configuration items.
|
||||
@@ -21,6 +22,9 @@ type RollerManagerConfig struct {
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
RollersPerSession uint8 `json:"rollers_per_session"`
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts,omitempty"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier,omitempty"`
|
||||
// Proof collection time (in minutes).
|
||||
@@ -74,6 +78,9 @@ func NewConfig(file string) (*Config, error) {
|
||||
if cfg.RollerManagerConfig.MaxVerifierWorkers == 0 {
|
||||
cfg.RollerManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
|
||||
}
|
||||
if cfg.RollerManagerConfig.SessionAttempts == 0 {
|
||||
cfg.RollerManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -30,10 +30,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
|
||||
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/verified/total", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/verified/failed/total", metrics.ScrollRegistry)
|
||||
// proofs
|
||||
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsGeneratedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
|
||||
|
||||
// sessions
|
||||
coordinatorSessionsSuccessTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/success/total", metrics.ScrollRegistry)
|
||||
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
|
||||
coordinatorSessionsFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
|
||||
|
||||
coordinatorSessionsActiveNumberGauge = geth_metrics.NewRegisteredCounter("coordinator/sessions/active/number", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -176,7 +185,7 @@ func (m *Manager) Loop() {
|
||||
}
|
||||
}
|
||||
// Select roller and send message
|
||||
for len(tasks) > 0 && m.StartProofGenerationSession(tasks[0]) {
|
||||
for len(tasks) > 0 && m.StartProofGenerationSession(tasks[0], nil) {
|
||||
tasks = tasks[1:]
|
||||
}
|
||||
case <-m.ctx.Done():
|
||||
@@ -242,7 +251,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
|
||||
}
|
||||
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
|
||||
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
@@ -267,6 +277,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof time", proofTimeSec,
|
||||
)
|
||||
|
||||
defer func() {
|
||||
@@ -287,11 +298,14 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}()
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
log.Error(
|
||||
"Roller failed to generate proof",
|
||||
"msg.ID", msg.ID,
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
)
|
||||
return nil
|
||||
@@ -309,15 +323,14 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
var err error
|
||||
success, err = m.verifyProof(msg.Proof)
|
||||
if err != nil {
|
||||
var verifyErr error
|
||||
success, verifyErr = m.verifyProof(msg.Proof)
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
} else {
|
||||
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
|
||||
}
|
||||
|
||||
if success {
|
||||
@@ -329,29 +342,40 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
coordinatorProofsVerifiedTotalCounter.Inc(1)
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTotalCounter.Inc(1)
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
//Cleanup roller sessions before return.
|
||||
defer func() {
|
||||
// TODO: remove the clean-up, rollers report healthy status.
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
}()
|
||||
coordinatorSessionsActiveNumberGauge.Inc(1)
|
||||
defer coordinatorSessionsActiveNumberGauge.Dec(1)
|
||||
|
||||
for {
|
||||
select {
|
||||
//Execute after timeout, set in config.json. Consider all rollers failed.
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
// Check if session can be replayed
|
||||
if sess.info.Attempts < m.cfg.SessionAttempts {
|
||||
if m.StartProofGenerationSession(nil, sess) {
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
log.Info("Retrying session", "session id:", sess.info.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
@@ -363,6 +387,13 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
return
|
||||
|
||||
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
|
||||
@@ -373,6 +404,7 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
@@ -386,7 +418,14 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
randIndex := mathrand.Intn(len(validRollers))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
|
||||
coordinatorSessionsSuccessTotalCounter.Inc(1)
|
||||
return
|
||||
}
|
||||
m.mu.Unlock()
|
||||
@@ -439,27 +478,39 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
|
||||
// StartProofGenerationSession starts a proof generation session
|
||||
func (m *Manager) StartProofGenerationSession(task *types.BlockBatch) (success bool) {
|
||||
func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
var taskId string
|
||||
if task != nil {
|
||||
taskId = task.Hash
|
||||
} else {
|
||||
taskId = prevSession.info.ID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers() == 0 {
|
||||
log.Warn("no idle roller when starting proof generation session", "id", task.Hash)
|
||||
log.Warn("no idle roller when starting proof generation session", "id", taskId)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start proof generation session", "id", task.Hash)
|
||||
log.Info("start proof generation session", "id", taskId)
|
||||
defer func() {
|
||||
if !success {
|
||||
if err := m.orm.UpdateProvingStatus(task.Hash, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", task.Hash, "err", err)
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateProvingStatus(taskId, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskId, "err", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.orm.UpdateProvingStatus(taskId, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskId, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get block traces.
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": task.Hash})
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": taskId})
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"could not GetBlockInfos",
|
||||
"batch_hash", task.Hash,
|
||||
"batch_hash", taskId,
|
||||
"error", err,
|
||||
)
|
||||
return false
|
||||
@@ -486,52 +537,58 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch) (success b
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", task.Hash, "name", roller.Name, "public key", roller.PublicKey)
|
||||
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(task.Hash, traces) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.Hash)
|
||||
if !roller.sendTask(taskId, traces) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", task.Hash, "number of idle rollers", m.GetNumberOfIdleRollers())
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle rollers", m.GetNumberOfIdleRollers())
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateProvingStatus(task.Hash, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", task.Hash, "err", err)
|
||||
if err = m.orm.UpdateProvingStatus(taskId, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskId, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: task.Hash,
|
||||
ID: taskId,
|
||||
Rollers: rollers,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "error", err)
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Error(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[task.Hash] = sess
|
||||
m.sessions[taskId] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -64,8 +63,8 @@ func setEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace := &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace); err != nil {
|
||||
wrappedBlock := &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -73,7 +72,7 @@ func setEnv(t *testing.T) (err error) {
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData = types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
batchData = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -88,6 +87,7 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestSeveralConnections", testSeveralConnections)
|
||||
t.Run("TestValidProof", testValidProof)
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestTimedoutProof", testTimedoutProof)
|
||||
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
|
||||
// TODO: Restart roller alone when received task, can add this test case in integration-test.
|
||||
//t.Run("TestRollerReconnect", testRollerReconnect)
|
||||
@@ -357,6 +357,86 @@ func testInvalidProof(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testTimedoutProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create first mock roller, that will not send any proof.
|
||||
roller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL)
|
||||
defer func() {
|
||||
// close connection
|
||||
roller1.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status, it should be assigned, because roller didn't send any proof
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskAssigned {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// create second mock roller, that will send valid proof.
|
||||
roller2 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL)
|
||||
roller2.waitTaskAndSendProof(t, time.Second, false, true)
|
||||
defer func() {
|
||||
// close connection
|
||||
roller2.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
// wait manager to finish first CollectProofs
|
||||
<-time.After(60 * time.Second)
|
||||
|
||||
// verify proof status, it should be verified now, because second roller sent valid proof
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testIdleRollerSelection(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
@@ -506,6 +586,7 @@ func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 5,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 2,
|
||||
}, db, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
71
coordinator/roller_metrics.go
Normal file
71
coordinator/roller_metrics.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
type rollerMetrics struct {
|
||||
rollerProofsVerifiedSuccessTimeTimer geth_metrics.Timer
|
||||
rollerProofsVerifiedFailedTimeTimer geth_metrics.Timer
|
||||
rollerProofsGeneratedFailedTimeTimer geth_metrics.Timer
|
||||
rollerProofsLastAssignedTimestampGauge geth_metrics.Gauge
|
||||
rollerProofsLastFinishedTimestampGauge geth_metrics.Gauge
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
} else {
|
||||
log.Error("rollerProofsLastFinishedTimestampGauge is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
} else {
|
||||
log.Error("rollerProofsLastAssignedTimestampGauge is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
} else {
|
||||
log.Error("rollerProofsVerifiedSuccessTimeTimer is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
|
||||
} else {
|
||||
log.Error("rollerProofsVerifiedFailedTimeTimer is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
|
||||
} else {
|
||||
log.Error("rollerProofsGeneratedFailedTimeTimer is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
@@ -30,6 +32,8 @@ type rollerNode struct {
|
||||
|
||||
// Time of message creation
|
||||
registerTime time.Time
|
||||
|
||||
*rollerMetrics
|
||||
}
|
||||
|
||||
func (r *rollerNode) sendTask(id string, traces []*geth_types.BlockTrace) bool {
|
||||
@@ -64,12 +68,20 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
|
||||
node, ok := m.rollerPool.Get(pubkey)
|
||||
if !ok {
|
||||
taskIDs := m.reloadRollerAssignedTasks(pubkey)
|
||||
rMs := &rollerMetrics{
|
||||
rollerProofsVerifiedSuccessTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsVerifiedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsGeneratedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastAssignedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
}
|
||||
node = &rollerNode{
|
||||
Name: identity.Name,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
Name: identity.Name,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
rollerMetrics: rMs,
|
||||
}
|
||||
m.rollerPool.Set(pubkey, node)
|
||||
}
|
||||
|
||||
@@ -13,18 +13,14 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/go-ethereum v1.11.4 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.2 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
|
||||
@@ -1,77 +1,44 @@
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/go-ethereum v1.11.4 h1:KG81SnUHXWk8LJB3mBcHg/E2yLvXoiPmRMCIRxgx3cE=
|
||||
github.com/ethereum/go-ethereum v1.11.4/go.mod h1:it7x0DWnTDMfVFdXcU6Ti4KEFQynLHVRarcSlPr0HBo=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
|
||||
github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
|
||||
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04 h1:PpI31kaBVm6+7sZtyK03Ex0QIg3P821Ktae0FHFh7IM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
|
||||
github.com/scroll-tech/zktrie v0.5.2 h1:U34jPXMLGOlRHfdvYp5VVgOcC0RuPeJmcS3bWotCWiY=
|
||||
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -79,34 +46,21 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -18,7 +18,7 @@ create table l1_message
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash);
|
||||
|
||||
@@ -18,7 +18,7 @@ create table l2_message
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
@@ -33,7 +33,7 @@ create table block_batch
|
||||
comment
|
||||
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
|
||||
comment
|
||||
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped';
|
||||
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
|
||||
comment
|
||||
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
|
||||
@@ -9,11 +9,9 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
type blockTraceOrm struct {
|
||||
@@ -39,7 +37,7 @@ func (o *blockTraceOrm) IsL2BlockExists(number uint64) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetL2BlockTracesLatestHeight() (int64, error) {
|
||||
func (o *blockTraceOrm) GetL2BlocksLatestHeight() (int64, error) {
|
||||
row := o.db.QueryRow("SELECT COALESCE(MAX(number), -1) FROM block_trace;")
|
||||
|
||||
var height int64
|
||||
@@ -49,7 +47,7 @@ func (o *blockTraceOrm) GetL2BlockTracesLatestHeight() (int64, error) {
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetL2BlockTraces(fields map[string]interface{}, args ...string) ([]*geth_types.BlockTrace, error) {
|
||||
func (o *blockTraceOrm) GetL2WrappedBlocks(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
|
||||
type Result struct {
|
||||
Trace string
|
||||
}
|
||||
@@ -66,24 +64,24 @@ func (o *blockTraceOrm) GetL2BlockTraces(fields map[string]interface{}, args ...
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var traces []*geth_types.BlockTrace
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for rows.Next() {
|
||||
result := &Result{}
|
||||
if err = rows.StructScan(result); err != nil {
|
||||
break
|
||||
}
|
||||
trace := geth_types.BlockTrace{}
|
||||
err = json.Unmarshal([]byte(result.Trace), &trace)
|
||||
wrappedBlock := types.WrappedBlock{}
|
||||
err = json.Unmarshal([]byte(result.Trace), &wrappedBlock)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
traces = append(traces, &trace)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return traces, rows.Close()
|
||||
return wrappedBlocks, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
|
||||
@@ -152,35 +150,31 @@ func (o *blockTraceOrm) GetL2BlockHashByNumber(number uint64) (*common.Hash, err
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) InsertL2BlockTraces(blockTraces []*geth_types.BlockTrace) error {
|
||||
traceMaps := make([]map[string]interface{}, len(blockTraces))
|
||||
for i, trace := range blockTraces {
|
||||
number, hash, txNum, mtime := trace.Header.Number.Int64(),
|
||||
trace.Header.Hash().String(),
|
||||
len(trace.Transactions),
|
||||
trace.Header.Time
|
||||
func (o *blockTraceOrm) InsertWrappedBlocks(blocks []*types.WrappedBlock) error {
|
||||
blockMaps := make([]map[string]interface{}, len(blocks))
|
||||
for i, block := range blocks {
|
||||
number, hash, txNum, mtime := block.Header.Number.Int64(),
|
||||
block.Header.Hash().String(),
|
||||
len(block.Transactions),
|
||||
block.Header.Time
|
||||
|
||||
gasCost := utils.ComputeTraceGasCost(trace)
|
||||
// clear the `StructLogs` to reduce storage cost
|
||||
for _, executionResult := range trace.ExecutionResults {
|
||||
executionResult.StructLogs = nil
|
||||
}
|
||||
data, err := json.Marshal(trace)
|
||||
gasCost := block.Header.GasUsed
|
||||
data, err := json.Marshal(block)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal blockTrace", "hash", hash, "err", err)
|
||||
log.Error("failed to marshal block", "hash", hash, "err", err)
|
||||
return err
|
||||
}
|
||||
traceMaps[i] = map[string]interface{}{
|
||||
blockMaps[i] = map[string]interface{}{
|
||||
"number": number,
|
||||
"hash": hash,
|
||||
"parent_hash": trace.Header.ParentHash.String(),
|
||||
"parent_hash": block.Header.ParentHash.String(),
|
||||
"trace": string(data),
|
||||
"tx_num": txNum,
|
||||
"gas_used": gasCost,
|
||||
"block_timestamp": mtime,
|
||||
}
|
||||
}
|
||||
_, err := o.db.NamedExec(`INSERT INTO public.block_trace (number, hash, parent_hash, trace, tx_num, gas_used, block_timestamp) VALUES (:number, :hash, :parent_hash, :trace, :tx_num, :gas_used, :block_timestamp);`, traceMaps)
|
||||
_, err := o.db.NamedExec(`INSERT INTO public.block_trace (number, hash, parent_hash, trace, tx_num, gas_used, block_timestamp) VALUES (:number, :hash, :parent_hash, :trace, :tx_num, :gas_used, :block_timestamp);`, blockMaps)
|
||||
if err != nil {
|
||||
log.Error("failed to insert blockTraces", "err", err)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
eth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// L1BlockOrm l1_block operation interface
|
||||
@@ -29,14 +28,14 @@ type L1BlockOrm interface {
|
||||
// BlockTraceOrm block_trace operation interface
|
||||
type BlockTraceOrm interface {
|
||||
IsL2BlockExists(number uint64) (bool, error)
|
||||
GetL2BlockTracesLatestHeight() (int64, error)
|
||||
GetL2BlockTraces(fields map[string]interface{}, args ...string) ([]*eth_types.BlockTrace, error)
|
||||
GetL2BlocksLatestHeight() (int64, error)
|
||||
GetL2WrappedBlocks(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error)
|
||||
GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
|
||||
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_hash is NULL"
|
||||
GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
|
||||
GetL2BlockHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchHash(batchHash string) error
|
||||
InsertL2BlockTraces(blockTraces []*eth_types.BlockTrace) error
|
||||
InsertWrappedBlocks(blockTraces []*types.WrappedBlock) error
|
||||
SetBatchHashForL2BlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchHash string) error
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
@@ -72,9 +71,9 @@ var (
|
||||
Layer2Hash: "hash1",
|
||||
},
|
||||
}
|
||||
blockTrace *geth_types.BlockTrace
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
wrappedBlock *types.WrappedBlock
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
|
||||
dbConfig *database.DBConfig
|
||||
base *docker.App
|
||||
@@ -109,8 +108,8 @@ func setupEnv(t *testing.T) error {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace); err != nil {
|
||||
wrappedBlock = &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,22 +117,22 @@ func setupEnv(t *testing.T) error {
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData1 = types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
batchData1 = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace2 := &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace2); err != nil {
|
||||
wrappedBlock2 := &types.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
}
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
|
||||
|
||||
// insert a fake empty block to batchData2
|
||||
fakeBlockContext := abi.IScrollChainBlockContext{
|
||||
@@ -181,27 +180,27 @@ func testOrmBlockTraces(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
|
||||
|
||||
res, err := ormBlock.GetL2BlockTraces(map[string]interface{}{})
|
||||
res, err := ormBlock.GetL2WrappedBlocks(map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(res) == 0)
|
||||
|
||||
exist, err := ormBlock.IsL2BlockExists(blockTrace.Header.Number.Uint64())
|
||||
exist, err := ormBlock.IsL2BlockExists(wrappedBlock.Header.Number.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, exist)
|
||||
|
||||
// Insert into db
|
||||
assert.NoError(t, ormBlock.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace}))
|
||||
assert.NoError(t, ormBlock.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock}))
|
||||
|
||||
res2, err := ormBlock.GetUnbatchedL2Blocks(map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(res2) == 1)
|
||||
|
||||
exist, err = ormBlock.IsL2BlockExists(blockTrace.Header.Number.Uint64())
|
||||
exist, err = ormBlock.IsL2BlockExists(wrappedBlock.Header.Number.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
|
||||
res, err = ormBlock.GetL2BlockTraces(map[string]interface{}{
|
||||
"hash": blockTrace.Header.Hash().String(),
|
||||
res, err = ormBlock.GetL2WrappedBlocks(map[string]interface{}{
|
||||
"hash": wrappedBlock.Header.Hash().String(),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(res) == 1)
|
||||
@@ -209,7 +208,7 @@ func testOrmBlockTraces(t *testing.T) {
|
||||
// Compare trace
|
||||
data1, err := json.Marshal(res[0])
|
||||
assert.NoError(t, err)
|
||||
data2, err := json.Marshal(blockTrace)
|
||||
data2, err := json.Marshal(wrappedBlock)
|
||||
assert.NoError(t, err)
|
||||
// check trace
|
||||
assert.Equal(t, true, string(data1) == string(data2))
|
||||
|
||||
34
docs/tests.md
Normal file
34
docs/tests.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Tests
|
||||
|
||||
## bridge & coordinator
|
||||
|
||||
### Prerequisite
|
||||
|
||||
+ rust
|
||||
+ go1.18
|
||||
+ docker
|
||||
|
||||
To run tests for bridge & coordinator, you would need to build some required docker images first.
|
||||
|
||||
```bash
|
||||
make dev_docker # under repo root directory
|
||||
```
|
||||
|
||||
|
||||
### Run the tests
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
```
|
||||
|
||||
You can also run some related tests (they are dependent to bridge & coordiantor) using
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
```
|
||||
|
||||
|
||||
## Contracts
|
||||
|
||||
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](../contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](../contracts/integration-test/).
|
||||
@@ -219,6 +219,7 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
|
||||
@@ -22,7 +22,10 @@ import (
|
||||
_ "scroll-tech/roller/cmd/app"
|
||||
rollerConfig "scroll-tech/roller/config"
|
||||
|
||||
_ "scroll-tech/bridge/cmd/app"
|
||||
_ "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
_ "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
_ "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
_ "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
bridgeConfig "scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
@@ -75,26 +78,53 @@ func free(t *testing.T) {
|
||||
}
|
||||
|
||||
type appAPI interface {
|
||||
OpenLog(open bool)
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
func runBridgeApp(t *testing.T, args ...string) appAPI {
|
||||
func runMsgRelayerApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", bridgeFile)
|
||||
return cmd.NewCmd("bridge-test", args...)
|
||||
app := cmd.NewCmd("message-relayer-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runGasOracleApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", bridgeFile)
|
||||
app := cmd.NewCmd("gas-oracle-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runRollupRelayerApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", bridgeFile)
|
||||
app := cmd.NewCmd("rollup-relayer-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runEventWatcherApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", bridgeFile)
|
||||
app := cmd.NewCmd("event-watcher-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runCoordinatorApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", coordinatorFile, "--ws", "--ws.port", strconv.Itoa(int(wsPort)))
|
||||
// start process
|
||||
return cmd.NewCmd("coordinator-test", args...)
|
||||
app := cmd.NewCmd("coordinator-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runDBCliApp(t *testing.T, option, keyword string) {
|
||||
args := []string{option, "--config", dbFile}
|
||||
app := cmd.NewCmd("db_cli-test", args...)
|
||||
app.OpenLog(true)
|
||||
defer app.WaitExit()
|
||||
|
||||
// Wait expect result.
|
||||
@@ -104,7 +134,9 @@ func runDBCliApp(t *testing.T, option, keyword string) {
|
||||
|
||||
func runRollerApp(t *testing.T, args ...string) appAPI {
|
||||
args = append(args, "--log.debug", "--config", rollerFile)
|
||||
return cmd.NewCmd("roller-test", args...)
|
||||
app := cmd.NewCmd("roller-test", args...)
|
||||
app.OpenLog(true)
|
||||
return app
|
||||
}
|
||||
|
||||
func runSender(t *testing.T, endpoint string) *sender.Sender {
|
||||
|
||||
@@ -41,8 +41,17 @@ func testStartProcess(t *testing.T) {
|
||||
runDBCliApp(t, "migrate", "current version:")
|
||||
|
||||
// Start bridge process.
|
||||
bridgeCmd := runBridgeApp(t)
|
||||
bridgeCmd.RunApp(func() bool { return bridgeCmd.WaitResult(t, time.Second*20, "Start bridge successfully") })
|
||||
ewCmd := runEventWatcherApp(t)
|
||||
ewCmd.RunApp(func() bool { return ewCmd.WaitResult(t, time.Second*20, "Start event-watcher successfully") })
|
||||
|
||||
goCmd := runGasOracleApp(t)
|
||||
goCmd.RunApp(func() bool { return goCmd.WaitResult(t, time.Second*20, "Start gas-oracle successfully") })
|
||||
|
||||
mrCmd := runMsgRelayerApp(t)
|
||||
mrCmd.RunApp(func() bool { return mrCmd.WaitResult(t, time.Second*20, "Start message-relayer successfully") })
|
||||
|
||||
rrCmd := runRollupRelayerApp(t)
|
||||
rrCmd.RunApp(func() bool { return rrCmd.WaitResult(t, time.Second*20, "Start rollup-relayer successfully") })
|
||||
|
||||
// Start coordinator process.
|
||||
coordinatorCmd := runCoordinatorApp(t, "--ws", "--ws.port", "8391")
|
||||
@@ -53,8 +62,11 @@ func testStartProcess(t *testing.T) {
|
||||
rollerCmd.ExpectWithTimeout(t, true, time.Second*60, "register to coordinator successfully!")
|
||||
rollerCmd.RunApp(func() bool { return rollerCmd.WaitResult(t, time.Second*40, "roller start successfully") })
|
||||
|
||||
ewCmd.WaitExit()
|
||||
goCmd.WaitExit()
|
||||
mrCmd.WaitExit()
|
||||
rrCmd.WaitExit()
|
||||
rollerCmd.WaitExit()
|
||||
bridgeCmd.WaitExit()
|
||||
coordinatorCmd.WaitExit()
|
||||
}
|
||||
|
||||
@@ -63,16 +75,30 @@ func testMonitorMetrics(t *testing.T) {
|
||||
runDBCliApp(t, "reset", "successful to reset")
|
||||
runDBCliApp(t, "migrate", "current version:")
|
||||
|
||||
// Start bridge process with metrics server.
|
||||
port1, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort1 := strconv.FormatInt(port1.Int64()+50000, 10)
|
||||
bridgeCmd := runBridgeApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort1)
|
||||
bridgeCmd.RunApp(func() bool { return bridgeCmd.WaitResult(t, time.Second*20, "Start bridge successfully") })
|
||||
ewCmd := runEventWatcherApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort1)
|
||||
ewCmd.RunApp(func() bool { return ewCmd.WaitResult(t, time.Second*20, "Start event-watcher successfully") })
|
||||
|
||||
port2, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort2 := strconv.FormatInt(port2.Int64()+50000, 10)
|
||||
goCmd := runGasOracleApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort2)
|
||||
goCmd.RunApp(func() bool { return goCmd.WaitResult(t, time.Second*20, "Start gas-oracle successfully") })
|
||||
|
||||
port3, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort3 := strconv.FormatInt(port3.Int64()+50000, 10)
|
||||
mrCmd := runMsgRelayerApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort3)
|
||||
mrCmd.RunApp(func() bool { return mrCmd.WaitResult(t, time.Second*20, "Start message-relayer successfully") })
|
||||
|
||||
port4, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort4 := strconv.FormatInt(port4.Int64()+50000, 10)
|
||||
rrCmd := runRollupRelayerApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort4)
|
||||
rrCmd.RunApp(func() bool { return rrCmd.WaitResult(t, time.Second*20, "Start rollup-relayer successfully") })
|
||||
|
||||
// Start coordinator process with metrics server.
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort2 := strconv.FormatInt(port.Int64()+52000, 10)
|
||||
coordinatorCmd := runCoordinatorApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort2)
|
||||
port5, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort5 := strconv.FormatInt(port5.Int64()+52000, 10)
|
||||
coordinatorCmd := runCoordinatorApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort5)
|
||||
coordinatorCmd.RunApp(func() bool { return coordinatorCmd.WaitResult(t, time.Second*20, "Start coordinator successfully") })
|
||||
|
||||
// Get bridge monitor metrics.
|
||||
@@ -87,7 +113,7 @@ func testMonitorMetrics(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(bodyStr, "bridge_l2_msgs_sync_height"))
|
||||
|
||||
// Get coordinator monitor metrics.
|
||||
resp, err = http.Get("http://localhost:" + svrPort2)
|
||||
resp, err = http.Get("http://localhost:" + svrPort5)
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
@@ -98,6 +124,9 @@ func testMonitorMetrics(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(bodyStr, "coordinator_rollers_disconnects_total"))
|
||||
|
||||
// Exit.
|
||||
bridgeCmd.WaitExit()
|
||||
ewCmd.WaitExit()
|
||||
goCmd.WaitExit()
|
||||
mrCmd.WaitExit()
|
||||
rrCmd.WaitExit()
|
||||
coordinatorCmd.WaitExit()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user