Compare commits

..

38 Commits

Author SHA1 Message Date
Péter Garamvölgyi
168eaf0fc2 Respect size limit in batch proposer (#446) 2023-05-04 08:44:05 +02:00
Orest Tarasiuk
85a1d5967f fix xargs parameter deprecation for -i (portability to macOS) (#445) 2023-05-04 14:06:36 +08:00
HAOYUatHZ
46f8b0e36c doc: add prerequisites (#443) 2023-04-28 16:51:41 +08:00
HAOYUatHZ
b7c39f64a7 refactor: move message package (#442) 2023-04-28 16:20:54 +08:00
georgehao
7fea3f5c22 test(coordinator): add API unit-tests (#433)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-26 10:20:36 +08:00
maskpp
7afddae276 fix(common): fix bug in auth message (#440)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-04-25 19:24:36 +08:00
colin
10ac638a51 test(coordinator): add more unit tests (#430)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-23 22:28:03 +08:00
maskpp
2fafb64e0a test(integration): add predeployed erc20 & greeter contract tests (#428)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-23 19:55:22 +08:00
maskpp
ab1cda6568 test(coordinator): simplify coordinator tests (#434) 2023-04-23 17:15:28 +08:00
colin
905961d0ad fix(Jenkinsfile): specify -coverpkg in unit tests (#431) 2023-04-21 20:50:36 +08:00
Xi Lin
cf9f0b921f feat(contracts): forward data to receiver after deposit/withdraw (#429)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 19:27:49 +08:00
maskpp
0f26e0d67b test(integration): pre-deploy test contracts into genesis (#420)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 16:23:16 +08:00
maskpp
c774462d1d test(integration-test): refactor integration-test (#425) 2023-04-18 21:33:12 +08:00
colin
7eb6d9696a chore(bridge & coordinator): clear some unused code (#423) 2023-04-17 22:09:52 +08:00
Lawliet-Chan
401ea68332 feat(libzkp): recover rust zkp panic instead of crashing (#421) 2023-04-14 20:51:50 +08:00
maskpp
c13e8aafc4 feat(tests): update docker app (#402)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-13 08:40:51 +08:00
colin
2b2cc62efe fix(coordinator): add metric roller_proofs_generated_failed_time (#419) 2023-04-12 18:27:55 +08:00
colin
807b7c7f33 refactor(coordinator): adjust logs for Loki query (#417)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-04-11 16:38:52 +08:00
HAOYUatHZ
454f032c9f doc(test): add testing doc (#412) 2023-04-11 11:05:33 +08:00
maskpp
d1c4fa716d fix(test): Clean the exited container by --rm after container stopped. (#416) 2023-04-10 19:18:59 +08:00
Lawliet-Chan
de1e40d19c fix(libzkp): load_params and seed in zk (#415) 2023-04-10 15:41:16 +08:00
Ahmed Castro
76b5a6c751 Small typo fix on documentation comments (#411)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-09 15:34:27 +08:00
colin
bad77eac2f feat(coordinator): prover monitoring (#392)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-07 09:06:58 +08:00
Péter Garamvölgyi
5d761ad812 Make sure attempts can be deserialized from db on startup (#410) 2023-04-05 19:00:54 +02:00
Nazarii Denha
4042bea6db retry proving timeout batch (#313)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-04-05 16:42:06 +02:00
maskpp
de7c38a903 feat(test): let integration-test log verbosity be configurable (#409) 2023-04-04 16:20:12 +08:00
Péter Garamvölgyi
41e2d960d8 Fix already executed revert message (#408) 2023-04-03 21:26:30 +08:00
HAOYUatHZ
170bc08207 build(docker): auto docker push when pushing git tags (#406) 2023-04-03 16:52:51 +08:00
maskpp
d3fc4e1606 feat(pending limit): Let sender's pending limit be configurable. (#398)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-04-03 14:24:47 +08:00
HAOYUatHZ
77749477db build(docker): only build docker images when push github tags (#404) 2023-04-01 11:54:56 +08:00
HAOYUatHZ
1a5df6f4d7 fix(build): move docker build from jenkins to github to avoid unknown errors (#403) 2023-03-31 15:55:55 +08:00
maskpp
826280253a fix(test): fix bug in testBatchProposerProposeBatch (#399)
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 13:58:46 +08:00
ChuhanJin
d376c903af feat(bridge): separate bridge into subcomponents (#397)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 11:04:24 +08:00
Max Wolff
179c6ee978 add failed relay status to db (#384)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-03-27 11:52:07 +02:00
HAOYUatHZ
165c9092da feat(bridge): fetch block transaction data instead of trace (#393)
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-25 11:50:02 +08:00
HAOYUatHZ
54c28fa512 build: update version (#387) 2023-03-24 09:25:10 +08:00
maskpp
3c7c41e1bb fix(cmd test): add more log to handle error and remove serial execution test (#391) 2023-03-23 16:43:41 +08:00
Péter Garamvölgyi
2962fa4b0e batch proposer: only sleep if we failed to create batch (#388) 2023-03-22 22:16:31 +08:00
158 changed files with 3817 additions and 2052 deletions

View File

@@ -66,3 +66,11 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

View File

@@ -62,3 +62,18 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
# uses: docker/build-push-action@v2
# with:
# context: .
# file: ./build/dockerfiles/coordinator.Dockerfile
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}

65
.github/workflows/docker.yaml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: Docker
on:
push:
tags:
- v**
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
push: true
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/msg_relayer.Dockerfile
push: true
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
push: true
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

48
Jenkinsfile vendored
View File

@@ -24,11 +24,12 @@ pipeline {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
sh 'make -C common/bytecode all'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
sh 'make -C bridge bridge_bins'
}
}
stage('Check Coordinator Compilation') {
@@ -42,16 +43,6 @@ pipeline {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
@@ -61,44 +52,29 @@ pipeline {
}
stage('Parallel Test') {
parallel{
stage('Test bridge package') {
stage('Race test common package') {
steps {
sh 'go test -v -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
}
}
stage('Test common package') {
steps {
sh 'go test -v -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
}
}
stage('Test coordinator package') {
steps {
sh 'go test -v -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
}
}
stage('Test database package') {
steps {
sh 'go test -v -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
}
}
stage('Race test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/bridge/...'
sh "cd ./bridge && ../build/run_tests.sh bridge"
}
}
stage('Race test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/coordinator/...'
sh "cd ./coordinator && ../build/run_tests.sh coordinator"
}
}
stage('Race test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.txt -covermode=atomic scroll-tech/database/...'
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
}
}
}

View File

@@ -1,3 +1,12 @@
# Scroll Monorepo
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
## Prerequisites
+ go1.18
+ rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ hardhat / foundry
---
For a more comprehensive doc, see [`docs/`](./docs).

View File

@@ -8,8 +8,23 @@ mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
bridge_bins: ## Builds the Bridge bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
event_watcher: ## Builds the event_watcher bin
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
gas_oracle: ## Builds the gas_oracle bin
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
message_relayer: ## Builds the message_relayer bin
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
rollup_relayer: ## Builds the rollup_relayer bin
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
@@ -20,8 +35,14 @@ lint: ## Lint the files - used for CI
clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile

View File

@@ -1,130 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// Init db connection.
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,19 +0,0 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd("bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,31 +0,0 @@
package app
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
)

View File

@@ -0,0 +1,114 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,136 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/app"
func main() {
app.Run()
}

106
bridge/cmd/mock_app.go Normal file
View File

@@ -0,0 +1,106 @@
package app
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/bridge/config"
)
// MockApp mockApp-test client manager.
type MockApp struct {
Config *config.Config
base *docker.App
mockApps map[utils.MockAppName]docker.AppAPI
originFile string
bridgeFile string
args []string
}
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
func NewBridgeApp(base *docker.App, file string) *MockApp {
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originFile: file,
bridgeFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
}
if err := bridgeApp.MockConfig(true); err != nil {
panic(err)
}
return bridgeApp
}
// RunApp run bridge-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp ||
name == utils.GasOracleApp ||
name == utils.MessageRelayerApp ||
name == utils.RollupRelayerApp) {
t.Errorf(fmt.Sprintf("Don't support the mock app, name: %s", name))
return
}
if app, ok := b.mockApps[name]; ok {
t.Logf(fmt.Sprintf("%s already exist, free the current and recreate again", string(name)))
app.WaitExit()
}
appAPI := cmd.NewCmd(string(name), append(b.args, args...)...)
keyword := fmt.Sprintf("Start %s successfully", string(name)[:len(string(name))-len("-test")])
appAPI.RunApp(func() bool { return appAPI.WaitResult(t, time.Second*20, keyword) })
b.mockApps[name] = appAPI
}
// WaitExit wait util all processes exit.
func (b *MockApp) WaitExit() {
for _, app := range b.mockApps {
app.WaitExit()
}
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
}
// Free stop and release bridge mocked apps.
func (b *MockApp) Free() {
b.WaitExit()
_ = os.Remove(b.bridgeFile)
}
// MockConfig creates a new bridge config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin bridge config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
b.Config = cfg
if !store {
return nil
}
// Store changed bridge config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.bridgeFile, data, 0600)
}

View File

@@ -0,0 +1,118 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/msg_relayer/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,133 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run rollup relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -19,7 +19,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -53,7 +54,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,

View File

@@ -18,6 +18,8 @@ type L2Config struct {
L2MessengerAddress common.Address `json:"l2_messenger_address"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config

View File

@@ -33,6 +33,8 @@ type SenderConfig struct {
MinBalance *big.Int `json:"min_balance,omitempty"`
// The interval (in seconds) to check balance and top up sender's accounts
CheckBalanceTime uint64 `json:"check_balance_time"`
// The sender's pending count limit.
PendingLimit int `json:"pending_limit,omitempty"`
}
// RelayerConfig loads relayer configuration items.

View File

@@ -4,6 +4,7 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
@@ -21,7 +22,6 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

View File

@@ -39,7 +39,6 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
@@ -66,6 +65,8 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=

View File

@@ -1,55 +0,0 @@
package l1
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L1 backend.
// The backend should monitor events in layer 1 and relay transactions to layer 2
type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
orm: orm,
}, nil
}
// Start Backend module.
func (l1 *Backend) Start() error {
l1.watcher.Start()
l1.relayer.Start()
return nil
}
// Stop Backend module.
func (l1 *Backend) Stop() {
l1.watcher.Stop()
l1.relayer.Stop()
}

View File

@@ -1,46 +0,0 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.DBConfig.DSN = base.DBEndpoint()
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
}

View File

@@ -1,76 +0,0 @@
package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L2 backend.
// The backend should monitor events in layer 2 and relay transactions to layer 1
type Backend struct {
cfg *config.L2Config
watcher *WatcherClient
relayer *Layer2Relayer
batchProposer *BatchProposer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
// Note: initialize watcher before relayer to keep DB consistent.
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, orm)
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
batchProposer: batchProposer,
orm: orm,
}, nil
}
// Start Backend module.
func (l2 *Backend) Start() error {
l2.watcher.Start()
l2.relayer.Start()
l2.batchProposer.Start()
return nil
}
// Stop Backend module.
func (l2 *Backend) Stop() {
l2.batchProposer.Stop()
l2.relayer.Stop()
l2.watcher.Stop()
}
// APIs collect API modules.
func (l2 *Backend) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "l2",
Version: "1.0",
Service: WatcherAPI(l2.watcher),
Public: true,
},
}
}

View File

@@ -1,99 +0,0 @@
package l2
import (
"fmt"
"golang.org/x/sync/errgroup"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/vm"
"github.com/scroll-tech/go-ethereum/log"
)
//nolint:unused
func blockTraceIsValid(trace *types.BlockTrace) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
}
flag := true
for _, tx := range trace.ExecutionResults {
flag = structLogResIsValid(tx.StructLogs) && flag
}
return flag
}
//nolint:unused
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
txLog := txLogs[i]
flag := true
switch vm.StringToOp(txLog.Op) {
case vm.CALL, vm.CALLCODE:
flag = codeIsValid(txLog, 2) && flag
flag = stateIsValid(txLog, 2) && flag
case vm.DELEGATECALL, vm.STATICCALL:
flag = codeIsValid(txLog, 2) && flag
case vm.CREATE, vm.CREATE2:
flag = stateIsValid(txLog, 1) && flag
case vm.SLOAD, vm.SSTORE, vm.SELFBALANCE:
flag = stateIsValid(txLog, 1) && flag
case vm.SELFDESTRUCT:
flag = stateIsValid(txLog, 2) && flag
case vm.EXTCODEHASH, vm.BALANCE:
flag = stateIsValid(txLog, 1) && flag
}
res = res && flag
}
return res
}
//nolint:unused
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.CodeList) < n {
log.Warn("code list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.CodeList))
return false
}
return true
}
//nolint:unused
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.StateList) < n {
log.Warn("stateList list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.StateList))
return false
}
return true
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
if trace == nil {
return false
}
eg := errgroup.Group{}
for _, res := range trace.ExecutionResults {
res := res
eg.Go(func() error {
for _, lg := range res.StructLogs {
if _, ok := opcodes[lg.Op]; ok {
return fmt.Errorf("unsupported opcde: %s", lg.Op)
}
}
return nil
})
}
err := eg.Wait()
return err != nil
}

View File

@@ -1,5 +0,0 @@
package l2
// WatcherAPI watcher api service
type WatcherAPI interface {
}

View File

@@ -1,10 +1,9 @@
package l1
package relayer
import (
"context"
"errors"
"math/big"
"time"
// not sure if this will make problems when relay with l1geth
@@ -15,7 +14,6 @@ import (
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
@@ -31,14 +29,6 @@ var (
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -53,11 +43,9 @@ type Layer1Relayer struct {
// channel used to communicate with transaction sender
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l2MessengerABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l1GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -65,8 +53,6 @@ type Layer1Relayer struct {
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
stopCh chan struct{}
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -96,21 +82,19 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer1Relayer{
l1Relayer := &Layer1Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -118,9 +102,11 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
stopCh: make(chan struct{}),
}, nil
cfg: cfg,
}
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
@@ -138,7 +124,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
@@ -153,7 +139,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
@@ -203,7 +189,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
@@ -220,57 +206,43 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
}
// Start the relayer process
func (r *Layer1Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go utils.Loop(ctx, 2*time.Second, r.ProcessSavedEvents)
go utils.Loop(ctx, 2*time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageCh:
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer1Relayer) Stop() {
close(r.stopCh)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}

View File

@@ -1,4 +1,4 @@
package l1
package relayer_test
import (
"context"
@@ -8,6 +8,8 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/relayer"
"scroll-tech/database"
)
@@ -19,9 +21,7 @@ func testCreateNewL1Relayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
assert.NotNil(t, relayer)
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer
import (
"context"
@@ -7,7 +7,6 @@ import (
"math/big"
"runtime"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -20,9 +19,8 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
@@ -40,14 +38,6 @@ var (
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 200000 // should be enough for both ERC20 and ETH relay
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -63,15 +53,12 @@ type Layer2Relayer struct {
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l2GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -91,8 +78,6 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -126,27 +111,24 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer2Relayer{
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
l2Client: l2Client,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.ScrollChainABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -158,8 +140,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
const processMsgLimit = 100
@@ -198,7 +181,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process l2 saved event", "err", err)
}
return
@@ -247,11 +230,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
@@ -297,7 +280,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
@@ -343,7 +326,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
txID := crypto.Keccak256Hash(bytes).String()
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
}
return err
@@ -493,7 +476,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
}
return
@@ -516,65 +499,20 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
// Start the relayer process
func (r *Layer2Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go cutil.Loop(ctx, time.Second, r.ProcessSavedEvents)
go cutil.Loop(ctx, time.Second, r.ProcessCommittedBatches)
go cutil.Loop(ctx, time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
if !confirmation.IsSuccessful {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
return
}
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
var status types.MsgStatus
if confirmation.IsSuccessful {
status = types.MsgConfirmed
} else {
status = types.MsgRelayFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), types.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
@@ -586,9 +524,16 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"
batchHashes := batchBatches.([]string)
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupCommitted
} else {
status = types.RollupCommitFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
for _, batchHash := range batchHashes {
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), types.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
}
@@ -600,8 +545,15 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
// check whether it is proof finalization transaction
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupFinalized
} else {
status = types.RollupFinalizeFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), types.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
}
@@ -610,3 +562,32 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer_test
import (
"context"
@@ -14,6 +14,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/relayer"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
@@ -39,11 +41,9 @@ func testCreateNewRelayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
assert.NotNil(t, relayer)
}
func testL2RelayerProcessSaveEvents(t *testing.T) {
@@ -54,27 +54,36 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
traces := []*geth_types.BlockTrace{
traces := []*types.WrappedBlock{
{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)),
},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
}
assert.NoError(t, db.InsertL2BlockTraces(traces))
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -100,10 +109,15 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -136,9 +150,8 @@ func testL2RelayerSkipBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
@@ -198,13 +211,13 @@ func genBatchData(t *testing.T, index uint64) *types.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
// unmarshal blockTrace
blockTrace := &geth_types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
wrappedBlock := &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock)
assert.NoError(t, err)
blockTrace.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
parentBatch := &types.BlockBatch{
Index: index,
Hash: "0x0000000000000000000000000000000000000000",
}
return types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
return types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
}

11
bridge/relayer/params.go Normal file
View File

@@ -0,0 +1,11 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

View File

@@ -0,0 +1,108 @@
package relayer_test
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
base *docker.App
// l2geth client
l2Cli *ethclient.Client
// block trace
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
// batch data
batchData1 *types.BatchData
batchData2 *types.BatchData
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
}
batchData1 = types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())
return err
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestFunctions(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 relayer test cases.
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
}

View File

@@ -6,21 +6,19 @@ import (
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/utils"
)
const (
@@ -37,6 +35,12 @@ const (
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
// ErrFullPending sender's pending pool is full.
ErrFullPending = errors.New("sender's pending pool is full")
)
var (
defaultPendingLimit = 10
)
// Confirmation struct used to indicate transaction confirmation details
@@ -74,9 +78,9 @@ type Sender struct {
// account fields.
auths *accountPool
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
stopCh chan struct{}
@@ -116,6 +120,11 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
}
}
// initialize pending limit with a default value
if config.PendingLimit == 0 {
config.PendingLimit = defaultPendingLimit
}
sender := &Sender{
ctx: ctx,
config: config,
@@ -125,7 +134,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: sync.Map{},
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
}
@@ -134,6 +143,21 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return sender, nil
}
// PendingCount returns the current number of pending txs.
func (s *Sender) PendingCount() int {
return s.pendingTxs.Count()
}
// PendingLimit returns the maximum number of pending txs the sender can handle.
func (s *Sender) PendingLimit() int {
return s.config.PendingLimit
}
// IsFull returns true if the sender's pending tx pool is full.
func (s *Sender) IsFull() bool {
return s.pendingTxs.Count() >= s.config.PendingLimit
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
@@ -159,21 +183,24 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
if s.IsFull() {
return common.Hash{}, ErrFullPending
}
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
}
}()
@@ -194,7 +221,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Store(ID, pending)
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
}
@@ -335,17 +362,17 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
s.pendingTxs.Range(func(key, value interface{}) bool {
for item := range s.pendingTxs.IterBuffered() {
key, pending := item.Key, item.Val
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
return true
if pending == nil {
continue
}
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
@@ -376,7 +403,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
@@ -398,8 +425,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
pending.submitAt = number
}
}
return true
})
}
}
// Loop is the main event loop

View File

@@ -50,13 +50,15 @@ func setupEnv(t *testing.T) {
// Load default private key.
privateKeys = []*ecdsa.PrivateKey{priv}
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
}
func TestSender(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test pending limit", func(t *testing.T) { testPendLimit(t) })
t.Run("test min gas limit", func(t *testing.T) { testMinGasLimit(t) })
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -64,6 +66,21 @@ func TestSender(t *testing.T) {
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
}
func testPendLimit(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
senderCfg.PendingLimit = 2
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
assert.NoError(t, err)
defer newSender.Stop()
for i := 0; i < newSender.PendingLimit(); i++ {
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
assert.NoError(t, err)
}
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
}
func testMinGasLimit(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
@@ -100,6 +117,7 @@ func testBatchSender(t *testing.T, batchSize int) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
senderCfg.PendingLimit = batchSize * TXBatch
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
@@ -119,7 +137,7 @@ func testBatchSender(t *testing.T, batchSize int) {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, sender.ErrNoAvailableAccount) {
if errors.Is(err, sender.ErrNoAvailableAccount) || errors.Is(err, sender.ErrFullPending) {
<-time.After(time.Second)
continue
}

View File

@@ -82,15 +82,15 @@ func setupEnv(t *testing.T) {
base.RunImages(t)
// Create l1geth container.
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.Endpoint = base.L1GethEndpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
// Create l2geth container.
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.L2Config.Endpoint = base.L2GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
// Create db container.
cfg.DBConfig.DSN = base.DBEndpoint()
cfg.DBConfig = base.DBConfig
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)

View File

@@ -11,8 +11,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,14 +30,13 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := cfg.L1Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -81,12 +80,11 @@ func testImportL2GasPrice(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// add fake blocks
traces := []*geth_types.BlockTrace{
traces := []*types.WrappedBlock{
{
Header: &geth_types.Header{
Number: big.NewInt(1),
@@ -94,16 +92,17 @@ func testImportL2GasPrice(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &geth_types.StorageTrace{},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
}
assert.NoError(t, db.InsertL2BlockTraces(traces))
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
traces[0],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,16 +33,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -56,7 +54,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
}
// l1 watch process events
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l1Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
@@ -79,7 +77,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed)

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,15 +33,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
// Create L2Watcher
confirmations := rpc.LatestBlockNumber
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -55,7 +55,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
@@ -65,7 +65,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*geth_types.BlockTrace{
traces := []*types.WrappedBlock{
{
Header: &geth_types.Header{
Number: sendReceipt.BlockNumber,
@@ -73,16 +73,17 @@ func testRelayL2MessageSucceed(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &geth_types.StorageTrace{},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
}
assert.NoError(t, db.InsertL2BlockTraces(traces))
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
traces[0],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String()
@@ -122,7 +123,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -143,7 +144,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -164,7 +165,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)

View File

@@ -12,8 +12,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,16 +30,15 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var traces []*geth_types.BlockTrace
var wrappedBlocks []*types.WrappedBlock
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := geth_types.Header{
@@ -48,21 +47,22 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &geth_types.BlockTrace{
Header: &header,
StorageTrace: &geth_types.StorageTrace{},
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
Header: &header,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
})
parentHash = header.Hash()
}
assert.NoError(t, db.InsertL2BlockTraces(traces))
assert.NoError(t, db.InsertWrappedBlocks(wrappedBlocks))
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
traces[0],
traces[1],
batchData := types.NewBatchData(parentBatch, []*types.WrappedBlock{
wrappedBlocks[0],
wrappedBlocks[1],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String()
@@ -95,7 +95,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -125,7 +125,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)

View File

@@ -29,19 +29,6 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
func BufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
@@ -76,23 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
// @todo: add unit test.
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}

View File

@@ -1,35 +1,33 @@
package l2
package watcher
import (
"context"
"fmt"
"math"
"reflect"
"sync"
"time"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
bridgeabi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
)
var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commit/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/blocks/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/txs/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/gas/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
@@ -84,15 +82,13 @@ type BatchProposer struct {
proofGenerationFreq uint64
batchDataBuffer []*types.BatchData
relayer *Layer2Relayer
relayer *relayer.Layer2Relayer
piCfg *types.PublicInputHashConfig
stopCh chan struct{}
}
// NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer {
p := &BatchProposer{
mutex: sync.Mutex{},
ctx: ctx,
@@ -108,42 +104,17 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
proofGenerationFreq: cfg.ProofGenerationFreq,
piCfg: cfg.PublicInputConfig,
relayer: relayer,
stopCh: make(chan struct{}),
}
// for graceful restart.
p.recoverBatchDataBuffer()
// try to commit the leftover pending batches
p.tryCommitBatches()
p.TryCommitBatches()
return p
}
// Start the Listening process
func (p *BatchProposer) Start() {
go func() {
if reflect.ValueOf(p.orm).IsNil() {
panic("must run BatchProposer with DB")
}
ctx, cancel := context.WithCancel(p.ctx)
go utils.Loop(ctx, 2*time.Second, func() {
p.tryProposeBatch()
p.tryCommitBatches()
})
<-p.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (p *BatchProposer) Stop() {
p.stopCh <- struct{}{}
}
func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
@@ -215,7 +186,8 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
}
}
func (p *BatchProposer) tryProposeBatch() {
// TryProposeBatch will try to propose a batch.
func (p *BatchProposer) TryProposeBatch() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -229,18 +201,23 @@ func (p *BatchProposer) tryProposeBatch() {
return
}
p.proposeBatch(blocks)
batchCreated := p.proposeBatch(blocks)
// while size of batchDataBuffer < commitCalldataMinSize,
// proposer keeps fetching and porposing batches.
if p.getBatchDataBufferSize() >= p.commitCalldataMinSize {
return
}
// wait for watcher to insert l2 traces.
time.Sleep(time.Second)
if !batchCreated {
// wait for watcher to insert l2 traces.
time.Sleep(time.Second)
}
}
}
func (p *BatchProposer) tryCommitBatches() {
// TryCommitBatches will try to commit the pending batches.
func (p *BatchProposer) TryCommitBatches() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -280,14 +257,48 @@ func (p *BatchProposer) tryCommitBatches() {
log.Error("SendCommitTx failed", "error", err)
} else {
// pop the processed batches from the buffer
bridgeL2BatchesCommitTotalCounter.Inc(1)
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
p.batchDataBuffer = p.batchDataBuffer[index:]
}
}
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if len(blocks) == 0 {
return
return false
}
approximatePayloadSize := func(hash string) (uint64, error) {
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
if err != nil {
return 0, err
}
if len(traces) != 1 {
return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces))
}
size := 0
for _, tx := range traces[0].Transactions {
size += len(tx.Data)
}
return uint64(size), nil
}
firstSize, err := approximatePayloadSize(blocks[0].Hash)
if err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
if firstSize > p.commitCalldataSizeLimit {
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize)
// note: we should probably fail here once we can ensure this will not happen
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
return true
}
if blocks[0].GasUsed > p.batchGasThreshold {
@@ -296,11 +307,11 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return
return true
}
if blocks[0].TxNum > p.batchTxNumThreshold {
@@ -309,40 +320,49 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return
return true
}
var gasUsed, txNum uint64
var gasUsed, txNum, payloadSize uint64
reachThreshold := false
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
size, err := approximatePayloadSize(block.Hash)
if err != nil {
log.Error("failed to create batch", "number", block.Number, "err", err)
return false
}
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
blocks = blocks[:i]
reachThreshold = true
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
payloadSize += size
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return
return false
}
if err := p.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(txNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks)))
}
return true
}
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
@@ -368,16 +388,16 @@ func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
}
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) {
var traces []*geth_types.BlockTrace
var wrappedBlocks []*types.WrappedBlock
for _, block := range blocks {
trs, err := p.orm.GetL2BlockTraces(map[string]interface{}{"hash": block.Hash})
trs, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash})
if err != nil || len(trs) != 1 {
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
return nil, err
}
traces = append(traces, trs[0])
wrappedBlocks = append(wrappedBlocks, trs[0])
}
return types.NewBatchData(parentBatch, traces, p.piCfg), nil
return types.NewBatchData(parentBatch, wrappedBlocks, p.piCfg), nil
}
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {

View File

@@ -1,4 +1,4 @@
package l2
package watcher_test
import (
"context"
@@ -6,13 +6,15 @@ import (
"math"
"testing"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/common/types"
)
@@ -22,34 +24,49 @@ func testBatchProposerProposeBatch(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
// Insert traces into db.
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace1}))
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
wc.Start()
defer wc.Stop()
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.tryProposeBatch()
proposer.TryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData1.Hash().Hex())
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
@@ -61,13 +78,26 @@ func testBatchProposerGracefulRestart(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace2}))
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
batchData1 := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}, []*types.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -85,7 +115,7 @@ func testBatchProposerGracefulRestart(t *testing.T) {
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
_ = watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,

11
bridge/watcher/common.go Normal file
View File

@@ -0,0 +1,11 @@
package watcher
import "github.com/scroll-tech/go-ethereum/common"
const contractEventsBlocksFetchLimit = int64(10)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}

View File

@@ -1,9 +1,8 @@
package l1
package watcher
import (
"context"
"math/big"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -17,9 +16,8 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
@@ -33,20 +31,14 @@ var (
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
status types.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
// L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct {
ctx context.Context
client *ethclient.Client
db database.OrmFactory
@@ -67,13 +59,10 @@ type Watcher struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
stopCh chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -92,9 +81,7 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
savedL1BlockHeight = startHeight
}
stopCh := make(chan bool)
return &Watcher{
return &L1WatcherClient{
ctx: ctx,
client: client,
db: db,
@@ -111,51 +98,11 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
stopCh: stopCh,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
go func() {
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchBlockHeader(number); err != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
}
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchContractEvent(number); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
}
})
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stopCh <- true
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -201,10 +148,15 @@ func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
func (w *L1WatcherClient) FetchContractEvent() error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return err
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -317,7 +269,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -1,4 +1,4 @@
package l1
package watcher_test
import (
"context"
@@ -9,6 +9,8 @@ import (
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/watcher"
)
func testStartWatcher(t *testing.T) {
@@ -18,12 +20,11 @@ func testStartWatcher(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(base.L1GethEndpoint())
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
watcher := watcher.NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
}

View File

@@ -1,16 +1,15 @@
package l2
package watcher
import (
"context"
"errors"
"fmt"
"math/big"
"reflect"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
@@ -20,7 +19,7 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
@@ -30,22 +29,16 @@ import (
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/gap", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
event.Feed
@@ -58,25 +51,25 @@ type WatcherClient struct {
messengerAddress common.Address
messengerABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
withdrawTrieRootSlot common.Hash
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
w := WatcherClient{
w := L2WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
@@ -86,10 +79,10 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2ScrollMessengerABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L2MessageQueueABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopCh: make(chan struct{}),
stopped: 0,
}
@@ -101,7 +94,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
return &w
}
func (w *WatcherClient) initializeGenesis() error {
func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
@@ -116,15 +109,7 @@ func (w *WatcherClient) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
blockTrace := &geth_types.BlockTrace{
Coinbase: nil,
Header: genesis,
Transactions: []*geth_types.TransactionData{},
StorageTrace: nil,
ExecutionResults: []*geth_types.ExecutionResult{},
MPTWitness: nil,
}
blockTrace := &types.WrappedBlock{Header: genesis, Transactions: nil, WithdrawTrieRoot: common.Hash{}}
batchData := types.NewGenesisBatchData(blockTrace)
if err = AddBatchInfoToDB(w.orm, batchData); err != nil {
@@ -147,52 +132,16 @@ func (w *WatcherClient) initializeGenesis() error {
return nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
}
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.tryFetchRunningMissingBlocks(ctx, number)
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.FetchContractEvent(number)
}
})
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetL2BlockTracesLatestHeight()
heightInDB, err := w.orm.GetL2BlocksLatestHeight()
if err != nil {
log.Error("failed to GetL2BlockTracesLatestHeight", "err", err)
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
return
}
@@ -214,24 +163,60 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2TracesFetchedHeightGauge.Update(int64(to))
bridgeL2TracesFetchedGapGauge.Update(int64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*geth_types.BlockTrace
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
txsData := make([]*geth_types.TransactionData, len(txs))
for i, tx := range txs {
v, r, s := tx.RawSignatureValues()
txsData[i] = &geth_types.TransactionData{
Type: tx.Type(),
TxHash: tx.Hash().String(),
Nonce: tx.Nonce(),
ChainId: (*hexutil.Big)(tx.ChainId()),
Gas: tx.Gas(),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
Data: hexutil.Encode(tx.Data()),
IsCreate: tx.To() == nil,
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err := w.orm.InsertL2BlockTraces(traces); err != nil {
return txsData
}
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockByNumber: %v. number: %v", err2, number)
}
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
})
}
if len(blocks) > 0 {
if err := w.orm.InsertWrappedBlocks(blocks); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
@@ -239,14 +224,18 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
func (w *L2WatcherClient) FetchContractEvent() {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -322,7 +311,7 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
}
}
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -1,4 +1,4 @@
package l2
package watcher_test
import (
"context"
@@ -19,6 +19,9 @@ import (
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -29,12 +32,16 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
l2db.Close()
}()
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2db)
rc.Start()
defer rc.Stop()
rc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
loopToFetchEvent(subCtx, rc)
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
@@ -60,12 +67,17 @@ func testMonitorBridgeContract(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
wc.Start()
defer wc.Stop()
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
@@ -79,9 +91,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
@@ -128,7 +138,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
@@ -141,8 +157,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
@@ -195,9 +210,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.Equal(t, 5, len(msgs))
}
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *watcher.L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, db)
return watcher.NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
@@ -209,3 +224,7 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
assert.NoError(t, err)
return auth
}
func loopToFetchEvent(subCtx context.Context, watcher *watcher.L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}

View File

@@ -1,12 +1,10 @@
package l2
package watcher_test
import (
"encoding/json"
"fmt"
"os"
"testing"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
@@ -26,12 +24,8 @@ var (
l2Cli *ethclient.Client
// block trace
blockTrace1 *geth_types.BlockTrace
blockTrace2 *geth_types.BlockTrace
// batch data
batchData1 *types.BatchData
batchData2 *types.BatchData
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
)
func setupEnv(t *testing.T) (err error) {
@@ -41,9 +35,9 @@ func setupEnv(t *testing.T) (err error) {
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.DBConfig.DSN = base.DBEndpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
@@ -54,35 +48,20 @@ func setupEnv(t *testing.T) (err error) {
return err
}
// unmarshal blockTrace
blockTrace1 = &geth_types.BlockTrace{}
if err = json.Unmarshal(templateBlockTrace1, blockTrace1); err != nil {
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
parentBatch1 := &types.BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData1 = types.NewBatchData(parentBatch1, []*geth_types.BlockTrace{blockTrace1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
blockTrace2 = &geth_types.BlockTrace{}
if err = json.Unmarshal(templateBlockTrace2, blockTrace2); err != nil {
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
}
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
return err
}
@@ -98,18 +77,13 @@ func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 watcher test cases.
t.Run("TestStartWatcher", testStartWatcher)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)

View File

@@ -13,7 +13,7 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build event_watcher
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/event_watcher /bin/
ENTRYPOINT ["event_watcher"]

View File

@@ -2,4 +2,4 @@ assets/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*

View File

@@ -11,16 +11,16 @@ COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build bridge
# Build gas_oracle
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull bridge into a second stage deploy alpine container
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/bridge /bin/
COPY --from=builder /bin/gas_oracle /bin/
ENTRYPOINT ["bridge"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build msg_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
# Pull msg_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/msg_relayer /bin/
ENTRYPOINT ["msg_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/rollup_relayer /bin/
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -4,7 +4,7 @@ ${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
# ${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
#${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \

26
build/run_tests.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
set -uex
profile_name=$1
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/config")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"
for pkg in $all_packages; do
exclude_pkg=false
for exclude_dir in "${exclude_dirs[@]}"; do
if [[ $pkg == $exclude_dir* ]]; then
exclude_pkg=true
break
fi
done
if [ "$exclude_pkg" = false ]; then
coverpkg="$coverpkg,$pkg/..."
fi
done
echo "coverage.${profile_name}.txt"
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...

2
common/bytecode/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.go
*.sol

9
common/bytecode/Makefile Normal file
View File

@@ -0,0 +1,9 @@
.PHONY: all erc20 greeter
all: erc20 greeter
erc20:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./erc20/ERC20Mock.json --pkg erc20 --out ./erc20/ERC20Mock.go
greeter:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./greeter/Greeter.json --pkg greeter --out ./greeter/Greeter.go

40
common/bytecode/README.md Normal file
View File

@@ -0,0 +1,40 @@
## How to pre deploy contracts?
* Please reference to https://github.com/scroll-tech/genesis-creator.
1. Setup env
```bash
git clone git@github.com:scroll-tech/genesis-creator.git
cd genesis-creator
go get -v github.com/scroll-tech/go-ethereum@develop && go mod tidy
make abi && make genesis-creator
make l2geth-docker
```
2. Start docker and write pre deployed contracts into genesis file.
```bash
make start-docker
./bin/genesis-creator -genesis ${SCROLLPATH}/common/docker/l2geth/genesis.json -contract [erc20|greeter]
```
3. Rebuild l2geth docker.
```bash
cd ${SCROLLPATH}
make dev_docker
```
## How to get contract abi?
* Other contracts' step same to eth20, e.g:
1. Install solc.
*Reference to https://docs.soliditylang.org/en/latest/installing-solidity.html*
2. Get abi file.
```bash
cd genesis-creator
solc --combined-json "abi" --optimize ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.sol | jq > ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.json
```
3. Translate abi to go.
```bash
cd ${SCROLLPATH}
make -C common/bytecode all
```

View File

@@ -0,0 +1,387 @@
{
"contracts": {
"tests/contracts/erc20/erc20.sol:ERC20Mock": {
"abi": [
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "address",
"name": "initialAccount",
"type": "address"
},
{
"internalType": "uint256",
"name": "initialBalance",
"type": "uint256"
}
],
"stateMutability": "payable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "approveInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "transferInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

View File

@@ -0,0 +1,72 @@
{
"contracts": {
"greeter/Greeter.sol:Greeter": {
"abi": [
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"inputs": [],
"name": "retrieve",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "retrieve_failing",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value_failing",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

View File

@@ -31,8 +31,10 @@ type Cmd struct {
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
//stdout bytes.Buffer
Err error
// open log flag.
openLog bool
// error channel
ErrChan chan error
}
// NewCmd create Cmd instance.
@@ -41,6 +43,7 @@ func NewCmd(name string, args ...string) *Cmd {
checkFuncs: cmap.New(),
name: name,
args: args,
ErrChan: make(chan error, 10),
}
}
@@ -58,12 +61,12 @@ func (c *Cmd) runCmd() {
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
cmd.Stdout = c
cmd.Stderr = c
_ = cmd.Run()
c.ErrChan <- cmd.Run()
}
// RunCmd parallel running when parallel is true.
func (c *Cmd) RunCmd(parallel bool) {
fmt.Println("cmd: ", c.args)
fmt.Println("cmd:", c.args)
if parallel {
go c.runCmd()
} else {
@@ -71,12 +74,17 @@ func (c *Cmd) RunCmd(parallel bool) {
}
}
// OpenLog open cmd log by this api.
func (c *Cmd) OpenLog(open bool) {
c.openLog = open
}
func (c *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
fmt.Printf("%s: %v", c.name, out)
if verbose || c.openLog {
fmt.Printf("%s:\n\t%v", c.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
fmt.Printf("%s: %v", c.name, out)
fmt.Printf("%s:\n\t%v", c.name, out)
}
go c.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)

View File

@@ -38,9 +38,17 @@ func (c *Cmd) RunApp(waitResult func() bool) {
// WaitExit wait util process exit.
func (c *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(c.Err != nil || c.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
// Wait all the check functions are finished, interrupt loop when appear error.
var err error
for err == nil && !c.checkFuncs.IsEmpty() {
select {
case err = <-c.ErrChan:
if err != nil {
fmt.Printf("%s appear error durning running, err: %v\n", c.name, err)
}
default:
<-time.After(time.Millisecond * 500)
}
}
// Send interrupt signal.
@@ -56,7 +64,7 @@ func (c *Cmd) WaitExit() {
// Interrupt send interrupt signal.
func (c *Cmd) Interrupt() {
c.mu.Lock()
c.Err = c.cmd.Process.Signal(os.Interrupt)
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
c.mu.Unlock()
}

View File

@@ -1,8 +1,8 @@
package docker
import (
"context"
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"math/big"
@@ -11,13 +11,11 @@ import (
"time"
"github.com/jmoiron/sqlx"
"github.com/modern-go/reflect2"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
@@ -27,208 +25,172 @@ var (
dbStartPort = 30000
)
// AppAPI app interface.
type AppAPI interface {
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
l1gethImg ImgInstance
l2gethImg ImgInstance
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbImg ImgInstance
dbConfig *database.DBConfig
dbFile string
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
timestamp int
Timestamp int
}
// NewDockerApp returns new instance of dokerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
return &App{
timestamp: timestamp,
dbFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.runDBImage(t)
b.runL1Geth(t)
b.runL2Geth(t)
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
func (b *App) runDBImage(t *testing.T) {
if b.dbImg != nil {
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
b.dbImg = newTestDBDocker(t, "postgres")
if err := b.mockDBConfig(); err != nil {
_ = b.dbImg.Stop()
b.dbImg = nil
_ = os.Remove(b.dbFile)
t.Fatal(err)
}
assert.NoError(t, b.DBImg.Start())
var isRun bool
// try 5 times until the db is ready.
utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
isRun = err == nil && db != nil && db.Ping() == nil
return isRun
})
assert.Equal(t, true, isRun)
}
// RunDBApp runs DB app with command
func (b *App) RunDBApp(t *testing.T, option, keyword string) {
args := []string{option, "--config", b.dbFile}
app := cmd.NewCmd("db_cli-test", args...)
defer app.WaitExit()
// Wait expect result.
app.ExpectWithTimeout(t, true, time.Second*3, keyword)
app.RunApp(nil)
}
// Free clear all running images
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.l1gethImg != nil {
_ = b.l1gethImg.Stop()
b.l1gethImg = nil
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.l2gethImg != nil {
_ = b.l2gethImg.Stop()
b.l2gethImg = nil
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.dbImg != nil {
_ = b.dbImg.Stop()
b.dbImg = nil
_ = os.Remove(b.dbFile)
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// L1GethEndpoint returns l1gethimg endpoint
func (b *App) L1GethEndpoint() string {
if b.l1gethImg != nil {
return b.l1gethImg.Endpoint()
}
return ""
}
// L2GethEndpoint returns l2gethimg endpoint
func (b *App) L2GethEndpoint() string {
if b.l2gethImg != nil {
return b.l2gethImg.Endpoint()
}
return ""
}
// DBEndpoint returns the endpoint of the dbimg
func (b *App) DBEndpoint() string {
return b.dbImg.Endpoint()
}
func (b *App) runL1Geth(t *testing.T) {
if b.l1gethImg != nil {
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
b.l1gethImg = newTestL1Docker(t)
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if b.l1gethImg == nil || reflect2.IsNil(b.l1gethImg) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.l1gethImg.Endpoint())
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
func (b *App) runL2Geth(t *testing.T) {
if b.l2gethImg != nil {
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
b.l2gethImg = newTestL2Docker(t)
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if b.l2gethImg == nil || reflect2.IsNil(b.l2gethImg) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.l2gethImg.Endpoint())
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
if b.dbConfig == nil {
b.dbConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.dbImg != nil {
b.dbConfig.DSN = b.dbImg.Endpoint()
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.dbConfig)
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.dbFile, data, 0644) //nolint:gosec
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker(t *testing.T) ImgInstance {
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker(t *testing.T) ImgInstance {
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(t *testing.T, driverName string) ImgInstance {
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(10, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

View File

@@ -75,14 +75,16 @@ func (i *ImgDB) Stop() error {
// Endpoint return the dsn.
func (i *ImgDB) Endpoint() string {
if !i.running {
return ""
}
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
}
// IsRunning returns docker container's running status.
func (i *ImgDB) IsRunning() bool {
return i.running
}
func (i *ImgDB) prepare() []string {
cmd := []string{"docker", "run", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
@@ -114,8 +116,12 @@ func (i *ImgDB) isOk() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 20):
return false
}
return i.id != ""
}

View File

@@ -3,11 +3,13 @@ package docker
import (
"context"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
@@ -23,13 +25,14 @@ type ImgGeth struct {
ipcPath string
httpPort int
wsPort int
chainID *big.Int
running bool
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(image, volume, ipc string, hPort, wPort int) ImgInstance {
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
@@ -53,14 +56,27 @@ func (i *ImgGeth) Start() error {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
return false
})
return nil
}
// IsRunning returns docker container's running status.
func (i *ImgGeth) IsRunning() bool {
return i.running
}
// Endpoint return the connection endpoint.
func (i *ImgGeth) Endpoint() string {
if !i.running {
return ""
}
switch true {
case i.httpPort != 0:
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
@@ -71,6 +87,11 @@ func (i *ImgGeth) Endpoint() string {
}
}
// ChainID return chainID.
func (i *ImgGeth) ChainID() *big.Int {
return i.chainID
}
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
@@ -93,10 +114,14 @@ func (i *ImgGeth) isOk() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 10):
return false
}
return i.id != ""
}
// Stop the docker container.
@@ -121,7 +146,7 @@ func (i *ImgGeth) Stop() error {
}
func (i *ImgGeth) prepare() []string {
cmds := []string{"docker", "run", "--name", i.name}
cmds := []string{"docker", "run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)

View File

@@ -8,8 +8,6 @@ import (
_ "github.com/lib/pq" //nolint:golint
"github.com/stretchr/testify/assert"
_ "scroll-tech/database/cmd/app"
"scroll-tech/common/docker"
)
@@ -25,50 +23,32 @@ func TestMain(m *testing.M) {
base.Free()
}
func TestStartProcess(t *testing.T) {
base.RunImages(t)
func TestDB(t *testing.T) {
base.RunDBImage(t)
// migrate db.
base.RunDBApp(t, "reset", "successful to reset")
base.RunDBApp(t, "migrate", "current version:")
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func TestDocker(t *testing.T) {
base.RunImages(t)
t.Parallel()
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
}
func testL1Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
func TestL1Geth(t *testing.T) {
base.RunL1Geth(t)
client, err := base.L1Client()
assert.NoError(t, err)
chainID, err := client.ChainID(ctx)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func testL2Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
func TestL2Geth(t *testing.T) {
base.RunL2Geth(t)
client, err := base.L2Client()
assert.NoError(t, err)
chainID, err := client.ChainID(ctx)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func testDB(t *testing.T) {
driverName := "postgres"
db, err := sqlx.Open(driverName, base.DBEndpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}

View File

@@ -2,6 +2,7 @@ package docker
import (
"context"
"math/big"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
@@ -26,6 +27,13 @@ type ImgInstance interface {
Start() error
Stop() error
Endpoint() string
IsRunning() bool
}
// GethImgInstance based on ImgInstance and add chainID interface.
type GethImgInstance interface {
ImgInstance
ChainID() *big.Int
}
// GetContainerID returns the ID of Container.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -79,13 +79,13 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/tools v0.7.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect

View File

@@ -444,8 +444,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -569,8 +569,8 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -1,9 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char};
use libc::c_char;
use std::cell::OnceCell;
use std::panic;
use std::ptr::null;
use types::eth::BlockTrace;
use zkevm::circuit::AGG_DEGREE;
use zkevm::utils::{load_or_create_params, load_or_create_seed};
use zkevm::utils::{load_params, load_seed};
use zkevm::{circuit::DEGREE, prover::Prover};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -15,9 +17,9 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
let params_path = c_char_to_str(params_path);
let seed_path = c_char_to_str(seed_path);
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_or_create_seed(seed_path).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_seed(seed_path).unwrap();
let p = Prover::from_params_and_seed(params, agg_params, seed);
PROVER.set(p).unwrap();
}
@@ -27,13 +29,15 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let trace = serde_json::from_slice::<BlockTrace>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof(&trace)
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof(&trace)
.unwrap();
serde_json::to_vec(&proof).unwrap()
});
proof_result.map_or(null(), vec_to_c_char)
}
/// # Safety
@@ -41,11 +45,13 @@ pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c
pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let traces = serde_json::from_slice::<Vec<BlockTrace>>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
serde_json::to_vec(&proof).unwrap()
});
proof_result.map_or(null(), vec_to_c_char)
}

View File

@@ -2,9 +2,10 @@ use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use std::fs::File;
use std::io::Read;
use std::panic;
use zkevm::circuit::{AGG_DEGREE, DEGREE};
use zkevm::prover::AggCircuitProof;
use zkevm::utils::load_or_create_params;
use zkevm::utils::load_params;
use zkevm::verifier::Verifier;
static mut VERIFIER: Option<&Verifier> = None;
@@ -20,8 +21,8 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
let mut agg_vk = vec![];
f.read_to_end(&mut agg_vk).unwrap();
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
VERIFIER = Some(Box::leak(v))
@@ -32,9 +33,11 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
pub unsafe extern "C" fn verify_agg_proof(proof: *const c_char) -> c_char {
let proof_vec = c_char_to_vec(proof);
let agg_proof = serde_json::from_slice::<AggCircuitProof>(proof_vec.as_slice()).unwrap();
let verified = VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok();
verified as c_char
let verified = panic::catch_unwind(|| {
VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok()
});
verified.unwrap_or(false) as c_char
}

View File

@@ -114,41 +114,41 @@ func (b *BatchData) Hash() *common.Hash {
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCfg *PublicInputHashConfig) *BatchData {
func NewBatchData(parentBatch *BlockBatch, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blockTraces))
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, trace := range blockTraces {
batchData.TotalTxNum += uint64(len(trace.Transactions))
batchData.TotalL2Gas += trace.Header.GasUsed
for i, block := range blocks {
batchData.TotalTxNum += uint64(len(block.Transactions))
batchData.TotalL2Gas += block.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := trace.Header.BaseFee
baseFee := block.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: trace.Header.Hash(),
ParentHash: trace.Header.ParentHash,
BlockNumber: trace.Header.Number.Uint64(),
Timestamp: trace.Header.Time,
BlockHash: block.Header.Hash(),
ParentHash: block.Header.ParentHash,
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: baseFee,
GasLimit: trace.Header.GasLimit,
NumTransactions: uint16(len(trace.Transactions)),
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(len(block.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range trace.Transactions {
for _, txData := range block.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
@@ -170,15 +170,14 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
// set PrevStateRoot from the first block
if i == 0 {
batch.PrevStateRoot = trace.StorageTrace.RootBefore
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blockTraces)-1 {
batch.NewStateRoot = trace.Header.Root
batch.WithdrawTrieRoot = trace.WithdrawTrieRoot
if i == len(blocks)-1 {
batch.NewStateRoot = block.Header.Root
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
}
}
@@ -193,7 +192,7 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *types.BlockTrace) *BatchData {
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")

View File

@@ -75,15 +75,7 @@ func TestNewGenesisBatch(t *testing.T) {
"wrong genesis block header",
)
blockTrace := &geth_types.BlockTrace{
Coinbase: nil,
Header: genesisBlock,
Transactions: []*geth_types.TransactionData{},
StorageTrace: nil,
ExecutionResults: []*geth_types.ExecutionResult{},
MPTWitness: nil,
}
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{

14
common/types/block.go Normal file
View File

@@ -0,0 +1,14 @@
package types
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}

View File

@@ -82,6 +82,9 @@ const (
// MsgExpired represents the from_layer message status is expired
MsgExpired
// MsgRelayFailed represents the from_layer message status is relay failed
MsgRelayFailed
)
// L1Message is structure of stored layer1 bridge message
@@ -159,6 +162,7 @@ type SessionInfo struct {
ID string `json:"id"`
Rollers map[string]*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
Attempts uint8 `json:"attempts,omitempty"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -200,7 +204,7 @@ func (ps ProvingStatus) String() string {
}
}
// RollupStatus block_batch rollup_status (pending, committing, committed, finalizing, finalized)
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
type RollupStatus int
const (
@@ -218,6 +222,10 @@ const (
RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
// RollupCommitFailed : rollup commit transaction confirmed but failed
RollupCommitFailed
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
RollupFinalizeFailed
)
// BlockBatch is structure of stored block_batch

View File

@@ -38,8 +38,6 @@ type Identity struct {
Name string `json:"name"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
@@ -56,13 +54,14 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil
}
// Sign auth message
func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
@@ -80,36 +79,27 @@ func (a *AuthMsg) Verify() (bool, error) {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.Identity.PublicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.Identity.PublicKey), hash, sig[:len(sig)-1]), nil
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
if a.Identity.PublicKey == "" {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.Identity.PublicKey, nil
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
return a.Identity.PublicKey, nil
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used

View File

@@ -19,7 +19,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
assert.NoError(t, authMsg.SignWithKey(privkey))
// check public key.
pk, err := authMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
ok, err := authMsg.Verify()
assert.NoError(t, err)

View File

@@ -8,10 +8,30 @@ import (
"github.com/urfave/cli/v2"
)
// MockAppName a new type mock app.
type MockAppName string
var (
// EventWatcherApp the name of mock event-watcher app.
EventWatcherApp MockAppName = "event-watcher-test"
// GasOracleApp the name of mock gas-oracle app.
GasOracleApp MockAppName = "gas-oracle-test"
// MessageRelayerApp the name of mock message-relayer app.
MessageRelayerApp MockAppName = "message-relayer-test"
// RollupRelayerApp the name of mock rollup-relayer app.
RollupRelayerApp MockAppName = "rollup-relayer-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// RollerApp the name of mock roller app.
RollerApp MockAppName = "roller-test"
)
// RegisterSimulation register initializer function for integration-test.
func RegisterSimulation(app *cli.App, name string) {
func RegisterSimulation(app *cli.App, name MockAppName) {
// Run the app for integration-test
reexec.Register(name, func() {
reexec.Register(string(name), func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)

View File

@@ -3,6 +3,8 @@ package utils
import (
"context"
"time"
"github.com/modern-go/reflect2"
)
// TryTimes try run several times until the function return true.
@@ -42,3 +44,8 @@ func Loop(ctx context.Context, period time.Duration, f func()) {
}
}
}
// IsNil Check if the interface is empty.
func IsNil(i interface{}) bool {
return i == nil || reflect2.IsNil(i)
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "alpha-v2.4"
var tag = "v3.0.12"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -13,7 +13,7 @@ The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 an
### batchWithdrawERC1155
```solidity
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -32,7 +32,7 @@ Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
### batchWithdrawERC1155
```solidity
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -319,7 +319,7 @@ Update layer 2 to layer 1 token mapping.
### withdrawERC1155
```solidity
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
@@ -338,7 +338,7 @@ Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
### withdrawERC1155
```solidity
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.

View File

@@ -13,7 +13,7 @@ The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and
### batchWithdrawERC721
```solidity
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
@@ -31,7 +31,7 @@ Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
### batchWithdrawERC721
```solidity
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
@@ -266,7 +266,7 @@ Update layer 2 to layer 1 token mapping.
### withdrawERC721
```solidity
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external payable
```
Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
@@ -284,7 +284,7 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
### withdrawERC721
```solidity
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external payable
```
Withdraw some ERC721 NFT to caller&#39;s account on layer 1.

View File

@@ -85,7 +85,7 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
// but it seems not a big problem.
IERC20Upgradeable(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to` in the near future
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -74,7 +74,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
(bool _success, ) = _to.call{value: _amount}("");
require(_success, "ETH transfer failed");
// @todo farward _data to `_to` in near future.
_doCallback(_to, _data);
emit FinalizeWithdrawETH(_from, _to, _amount, _data);
}

View File

@@ -99,7 +99,7 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
// but it seems not a big problem.
IERC20(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to` in the near future
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -90,7 +90,7 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
IWETH(_l1Token).deposit{value: _amount}();
IERC20(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to`.
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -79,10 +79,10 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == tokenMapping[_l2Token], "l1 token mismatch");
// @todo forward `_callData` to `_to` using transferAndCall in the near future
IScrollStandardERC20(_l2Token).mint(_to, _amount);
_doCallback(_to, _data);
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -72,7 +72,7 @@ contract L2ETHGateway is Initializable, ScrollGatewayBase, IL2ETHGateway {
(bool _success, ) = _to.call{value: _amount}("");
require(_success, "ETH transfer failed");
// @todo farward _data to `_to` in near future.
_doCallback(_to, _data);
emit FinalizeDepositETH(_from, _to, _amount, _data);
}

View File

@@ -105,10 +105,10 @@ contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gate
_deployL2Token(_deployData, _l1Token);
}
// @todo forward `_callData` to `_to` using transferAndCall in the near future
IScrollStandardERC20(_l2Token).mint(_to, _amount);
_doCallback(_to, _callData);
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _callData);
}

View File

@@ -90,7 +90,7 @@ contract L2WETHGateway is Initializable, ScrollGatewayBase, L2ERC20Gateway {
IWETH(_l2Token).deposit{value: _amount}();
IERC20(_l2Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to` in near future
_doCallback(_to, _data);
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -43,7 +43,7 @@ interface IScrollMessenger {
*****************************/
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
/// @param target The address of account who recieve the message.
/// @param target The address of account who receive the message.
/// @param value The amount of ether passed when call target contract.
/// @param message The content of the message.
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
@@ -55,7 +55,7 @@ interface IScrollMessenger {
) external payable;
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
/// @param target The address of account who recieve the message.
/// @param target The address of account who receive the message.
/// @param value The amount of ether passed when call target contract.
/// @param message The content of the message.
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.

Some files were not shown because too many files have changed in this diff Show More