Compare commits

..

43 Commits

Author SHA1 Message Date
maskpp
7efb5a803b Update makefile. 2023-05-04 16:34:52 +08:00
maskpp
f0d6590406 Update makefile. 2023-05-04 16:24:58 +08:00
maskpp
357fe3cf2c Merge branch 'develop' into feat/integration-test_scroll_contracts 2023-05-04 16:08:48 +08:00
maskpp
d62f9e4f5a Add a test account in genesis file. 2023-05-04 16:07:02 +08:00
Péter Garamvölgyi
168eaf0fc2 Respect size limit in batch proposer (#446) 2023-05-04 08:44:05 +02:00
maskpp
59a47b3d2f fix ci 2023-05-04 14:40:42 +08:00
maskpp
a2a8f85107 fix ci 2023-05-04 14:35:30 +08:00
maskpp
84c76050c2 Merge branch 'develop' into feat/integration-test_scroll_contracts 2023-05-04 14:29:00 +08:00
maskpp
100713c7c0 Deposit test case. 2023-05-04 14:28:15 +08:00
Orest Tarasiuk
85a1d5967f fix xargs parameter deprecation for -i (portability to macOS) (#445) 2023-05-04 14:06:36 +08:00
maskpp
40654cc35c revert genesis.json 2023-04-28 17:22:17 +08:00
HAOYUatHZ
46f8b0e36c doc: add prerequisites (#443) 2023-04-28 16:51:41 +08:00
HAOYUatHZ
b7c39f64a7 refactor: move message package (#442) 2023-04-28 16:20:54 +08:00
georgehao
7fea3f5c22 test(coordinator): add API unit-tests (#433)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-26 10:20:36 +08:00
maskpp
7afddae276 fix(common): fix bug in auth message (#440)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-04-25 19:24:36 +08:00
maskpp
d3c5946ddf Add scroll abi and contracts code. 2023-04-25 11:05:11 +08:00
colin
10ac638a51 test(coordinator): add more unit tests (#430)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-23 22:28:03 +08:00
maskpp
2fafb64e0a test(integration): add predeployed erc20 & greeter contract tests (#428)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-23 19:55:22 +08:00
maskpp
ab1cda6568 test(coordinator): simplify coordinator tests (#434) 2023-04-23 17:15:28 +08:00
colin
905961d0ad fix(Jenkinsfile): specify -coverpkg in unit tests (#431) 2023-04-21 20:50:36 +08:00
Xi Lin
cf9f0b921f feat(contracts): forward data to receiver after deposit/withdraw (#429)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 19:27:49 +08:00
maskpp
0f26e0d67b test(integration): pre-deploy test contracts into genesis (#420)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 16:23:16 +08:00
maskpp
c774462d1d test(integration-test): refactor integration-test (#425) 2023-04-18 21:33:12 +08:00
colin
7eb6d9696a chore(bridge & coordinator): clear some unused code (#423) 2023-04-17 22:09:52 +08:00
Lawliet-Chan
401ea68332 feat(libzkp): recover rust zkp panic instead of crashing (#421) 2023-04-14 20:51:50 +08:00
maskpp
c13e8aafc4 feat(tests): update docker app (#402)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-13 08:40:51 +08:00
colin
2b2cc62efe fix(coordinator): add metric roller_proofs_generated_failed_time (#419) 2023-04-12 18:27:55 +08:00
colin
807b7c7f33 refactor(coordinator): adjust logs for Loki query (#417)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-04-11 16:38:52 +08:00
HAOYUatHZ
454f032c9f doc(test): add testing doc (#412) 2023-04-11 11:05:33 +08:00
maskpp
d1c4fa716d fix(test): Clean the exited container by --rm after container stopped. (#416) 2023-04-10 19:18:59 +08:00
Lawliet-Chan
de1e40d19c fix(libzkp): load_params and seed in zk (#415) 2023-04-10 15:41:16 +08:00
Ahmed Castro
76b5a6c751 Small typo fix on documentation comments (#411)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-09 15:34:27 +08:00
colin
bad77eac2f feat(coordinator): prover monitoring (#392)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-07 09:06:58 +08:00
Péter Garamvölgyi
5d761ad812 Make sure attempts can be deserialized from db on startup (#410) 2023-04-05 19:00:54 +02:00
Nazarii Denha
4042bea6db retry proving timeout batch (#313)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-04-05 16:42:06 +02:00
maskpp
de7c38a903 feat(test): let integration-test log verbosity be configurable (#409) 2023-04-04 16:20:12 +08:00
Péter Garamvölgyi
41e2d960d8 Fix already executed revert message (#408) 2023-04-03 21:26:30 +08:00
HAOYUatHZ
170bc08207 build(docker): auto docker push when pushing git tags (#406) 2023-04-03 16:52:51 +08:00
maskpp
d3fc4e1606 feat(pending limit): Let sender's pending limit be configurable. (#398)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-04-03 14:24:47 +08:00
HAOYUatHZ
77749477db build(docker): only build docker images when push github tags (#404) 2023-04-01 11:54:56 +08:00
HAOYUatHZ
1a5df6f4d7 fix(build): move docker build from jenkins to github to avoid unknown errors (#403) 2023-03-31 15:55:55 +08:00
maskpp
826280253a fix(test): fix bug in testBatchProposerProposeBatch (#399)
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 13:58:46 +08:00
ChuhanJin
d376c903af feat(bridge): separate bridge into subcomponents (#397)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 11:04:24 +08:00
180 changed files with 16827 additions and 1789 deletions

View File

@@ -66,3 +66,11 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

View File

@@ -62,3 +62,18 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
# uses: docker/build-push-action@v2
# with:
# context: .
# file: ./build/dockerfiles/coordinator.Dockerfile
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}

65
.github/workflows/docker.yaml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: Docker
on:
push:
tags:
- v**
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
push: true
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/msg_relayer.Dockerfile
push: true
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
push: true
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

17
Jenkinsfile vendored
View File

@@ -24,11 +24,12 @@ pipeline {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
sh 'make -C common/bytecode all'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
sh 'make -C bridge bridge_bins'
}
}
stage('Check Coordinator Compilation') {
@@ -42,16 +43,6 @@ pipeline {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
@@ -68,12 +59,12 @@ pipeline {
}
stage('Race test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic scroll-tech/bridge/...'
sh "cd ./bridge && ../build/run_tests.sh bridge"
}
}
stage('Race test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic scroll-tech/coordinator/...'
sh "cd ./coordinator && ../build/run_tests.sh coordinator"
}
}
stage('Race test database package') {

View File

@@ -1,3 +1,12 @@
# Scroll Monorepo
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
## Prerequisites
+ go1.18
+ rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ hardhat / foundry
---
For a more comprehensive doc, see [`docs/`](./docs).

View File

@@ -8,8 +8,23 @@ mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
bridge_bins: ## Builds the Bridge bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
event_watcher: ## Builds the event_watcher bin
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
gas_oracle: ## Builds the gas_oracle bin
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
message_relayer: ## Builds the message_relayer bin
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
rollup_relayer: ## Builds the rollup_relayer bin
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
@@ -20,8 +35,14 @@ lint: ## Lint the files - used for CI
clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile

View File

@@ -1,130 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// Init db connection.
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,19 +0,0 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd("bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,31 +0,0 @@
package app
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
)

View File

@@ -0,0 +1,114 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,136 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/app"
func main() {
app.Run()
}

123
bridge/cmd/mock_app.go Normal file
View File

@@ -0,0 +1,123 @@
package app
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/bridge/config"
)
// MockApp mockApp-test client manager.
type MockApp struct {
Config *config.Config
base *docker.App
mockApps map[utils.MockAppName]docker.AppAPI
originConfigFile string
BridgeConfigFile string
args []string
}
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
func NewBridgeApp(base *docker.App, file string) *MockApp {
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originConfigFile: file,
BridgeConfigFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
}
if err := bridgeApp.MockConfig(true); err != nil {
panic(err)
}
return bridgeApp
}
// RunApp run bridge-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp ||
name == utils.GasOracleApp ||
name == utils.MessageRelayerApp ||
name == utils.RollupRelayerApp) {
t.Errorf(fmt.Sprintf("Don't support the mock app, name: %s", name))
return
}
if app, ok := b.mockApps[name]; ok {
t.Logf(fmt.Sprintf("%s already exist, free the current and recreate again", string(name)))
app.WaitExit()
}
appAPI := cmd.NewCmd(string(name), append(b.args, args...)...)
keyword := fmt.Sprintf("Start %s successfully", string(name)[:len(string(name))-len("-test")])
appAPI.RunApp(func() bool { return appAPI.WaitResult(t, time.Second*20, keyword) })
b.mockApps[name] = appAPI
}
// WaitExit wait util all processes exit.
func (b *MockApp) WaitExit() {
for _, app := range b.mockApps {
app.WaitExit()
}
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
}
// Free stop and release bridge mocked apps.
func (b *MockApp) Free() {
b.WaitExit()
_ = os.Remove(b.BridgeConfigFile)
}
// MockConfig creates a new bridge config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin bridge config file.
cfg, err := config.NewConfig(b.originConfigFile)
if err != nil {
return err
}
var (
l1Cfg, l2Cfg = cfg.L1Config, cfg.L2Config
l1Contracts, l2Contracts = base.L1Contracts, base.L2Contracts
)
l1Cfg.Confirmations = 0
// set l1 and l2 chain endpoint.
l1Cfg.Endpoint = base.L1gethImg.Endpoint()
l2Cfg.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
l2Cfg.Endpoint = base.L2gethImg.Endpoint()
l1Cfg.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
// set l1 scroll contracts addresses.
l1Cfg.L1MessageQueueAddress = l1Contracts.L1MessageQueue
l1Cfg.ScrollChainContractAddress = l1Contracts.L1WETH
l1Cfg.L1MessengerAddress = l1Contracts.L1ScrollMessenger
// set l2 scroll contracts addresses.
l2Cfg.L2MessageQueueAddress = l2Contracts.L2MessageQueue
l2Cfg.L2MessengerAddress = l2Contracts.L2ScrollMessenger
b.Config = cfg
if !store {
return nil
}
// Store changed bridge config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.BridgeConfigFile, data, 0600)
}

View File

@@ -0,0 +1,118 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/msg_relayer/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,133 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run rollup relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -19,7 +19,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -30,7 +31,7 @@
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
"1313131313131313131313131313131313131313131313131313131313131313"
]
}
},
@@ -53,7 +54,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -64,10 +66,10 @@
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
"1313131313131313131313131313131313131313131313131313131313131313"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
"1414141414141414141414141414141414141414141414141414141414141414"
]
},
"batch_proposer_config": {

View File

@@ -33,6 +33,8 @@ type SenderConfig struct {
MinBalance *big.Int `json:"min_balance,omitempty"`
// The interval (in seconds) to check balance and top up sender's accounts
CheckBalanceTime uint64 `json:"check_balance_time"`
// The sender's pending count limit.
PendingLimit int `json:"pending_limit,omitempty"`
}
// RelayerConfig loads relayer configuration items.

View File

@@ -4,6 +4,7 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
@@ -21,7 +22,6 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

View File

@@ -39,7 +39,6 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
@@ -66,6 +65,8 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=

View File

@@ -1,55 +0,0 @@
package l1
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L1 backend.
// The backend should monitor events in layer 1 and relay transactions to layer 2
type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
orm: orm,
}, nil
}
// Start Backend module.
func (l1 *Backend) Start() error {
l1.watcher.Start()
l1.relayer.Start()
return nil
}
// Stop Backend module.
func (l1 *Backend) Stop() {
l1.watcher.Stop()
l1.relayer.Stop()
}

View File

@@ -1,46 +0,0 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.DBConfig.DSN = base.DBEndpoint()
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
}

View File

@@ -1,76 +0,0 @@
package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L2 backend.
// The backend should monitor events in layer 2 and relay transactions to layer 1
type Backend struct {
cfg *config.L2Config
watcher *WatcherClient
relayer *Layer2Relayer
batchProposer *BatchProposer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
// Note: initialize watcher before relayer to keep DB consistent.
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, orm)
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
batchProposer: batchProposer,
orm: orm,
}, nil
}
// Start Backend module.
func (l2 *Backend) Start() error {
l2.watcher.Start()
l2.relayer.Start()
l2.batchProposer.Start()
return nil
}
// Stop Backend module.
func (l2 *Backend) Stop() {
l2.batchProposer.Stop()
l2.relayer.Stop()
l2.watcher.Stop()
}
// APIs collect API modules.
func (l2 *Backend) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "l2",
Version: "1.0",
Service: WatcherAPI(l2.watcher),
Public: true,
},
}
}

View File

@@ -1,99 +0,0 @@
package l2
import (
"fmt"
"golang.org/x/sync/errgroup"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/vm"
"github.com/scroll-tech/go-ethereum/log"
)
//nolint:unused
func blockTraceIsValid(trace *types.BlockTrace) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
}
flag := true
for _, tx := range trace.ExecutionResults {
flag = structLogResIsValid(tx.StructLogs) && flag
}
return flag
}
//nolint:unused
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
txLog := txLogs[i]
flag := true
switch vm.StringToOp(txLog.Op) {
case vm.CALL, vm.CALLCODE:
flag = codeIsValid(txLog, 2) && flag
flag = stateIsValid(txLog, 2) && flag
case vm.DELEGATECALL, vm.STATICCALL:
flag = codeIsValid(txLog, 2) && flag
case vm.CREATE, vm.CREATE2:
flag = stateIsValid(txLog, 1) && flag
case vm.SLOAD, vm.SSTORE, vm.SELFBALANCE:
flag = stateIsValid(txLog, 1) && flag
case vm.SELFDESTRUCT:
flag = stateIsValid(txLog, 2) && flag
case vm.EXTCODEHASH, vm.BALANCE:
flag = stateIsValid(txLog, 1) && flag
}
res = res && flag
}
return res
}
//nolint:unused
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.CodeList) < n {
log.Warn("code list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.CodeList))
return false
}
return true
}
//nolint:unused
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.StateList) < n {
log.Warn("stateList list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.StateList))
return false
}
return true
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
if trace == nil {
return false
}
eg := errgroup.Group{}
for _, res := range trace.ExecutionResults {
res := res
eg.Go(func() error {
for _, lg := range res.StructLogs {
if _, ok := opcodes[lg.Op]; ok {
return fmt.Errorf("unsupported opcde: %s", lg.Op)
}
}
return nil
})
}
err := eg.Wait()
return err != nil
}

View File

@@ -1,5 +0,0 @@
package l2
// WatcherAPI watcher api service
type WatcherAPI interface {
}

View File

@@ -1,10 +1,9 @@
package l1
package relayer
import (
"context"
"errors"
"math/big"
"time"
// not sure if this will make problems when relay with l1geth
@@ -15,7 +14,6 @@ import (
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
@@ -31,14 +29,6 @@ var (
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -53,11 +43,9 @@ type Layer1Relayer struct {
// channel used to communicate with transaction sender
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l2MessengerABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l1GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -65,8 +53,6 @@ type Layer1Relayer struct {
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
stopCh chan struct{}
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -96,21 +82,19 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer1Relayer{
l1Relayer := &Layer1Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -118,9 +102,11 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
stopCh: make(chan struct{}),
}, nil
cfg: cfg,
}
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
@@ -138,7 +124,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
@@ -153,7 +139,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
@@ -203,7 +189,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
@@ -220,61 +206,43 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
}
// Start the relayer process
func (r *Layer1Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go utils.Loop(ctx, 2*time.Second, r.ProcessSavedEvents)
go utils.Loop(ctx, 2*time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageCh:
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer1Relayer) Stop() {
close(r.stopCh)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}

View File

@@ -1,4 +1,4 @@
package l1
package relayer_test
import (
"context"
@@ -8,6 +8,8 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/relayer"
"scroll-tech/database"
)
@@ -19,9 +21,7 @@ func testCreateNewL1Relayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
assert.NotNil(t, relayer)
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer
import (
"context"
@@ -7,7 +7,6 @@ import (
"math/big"
"runtime"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -20,9 +19,8 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
@@ -40,14 +38,6 @@ var (
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 200000 // should be enough for both ERC20 and ETH relay
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -63,15 +53,12 @@ type Layer2Relayer struct {
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l2GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -91,8 +78,6 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -126,27 +111,24 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer2Relayer{
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
l2Client: l2Client,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.ScrollChainABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -158,8 +140,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
const processMsgLimit = 100
@@ -198,7 +181,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process l2 saved event", "err", err)
}
return
@@ -247,11 +230,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
@@ -297,7 +280,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
@@ -343,7 +326,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
txID := crypto.Keccak256Hash(bytes).String()
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
}
return err
@@ -493,7 +476,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
}
return
@@ -516,53 +499,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
// Start the relayer process
func (r *Layer2Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go cutil.Loop(ctx, time.Second, r.ProcessSavedEvents)
go cutil.Loop(ctx, time.Second, r.ProcessCommittedBatches)
go cutil.Loop(ctx, time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
@@ -626,3 +562,32 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer_test
import (
"context"
@@ -14,6 +14,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/relayer"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
@@ -39,11 +41,9 @@ func testCreateNewRelayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
assert.NotNil(t, relayer)
}
func testL2RelayerProcessSaveEvents(t *testing.T) {
@@ -54,9 +54,8 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
@@ -79,6 +78,12 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
}
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -104,10 +109,15 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -140,9 +150,8 @@ func testL2RelayerSkipBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()

11
bridge/relayer/params.go Normal file
View File

@@ -0,0 +1,11 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

View File

@@ -1,4 +1,4 @@
package l2
package relayer_test
import (
"encoding/json"
@@ -40,9 +40,9 @@ func setupEnv(t *testing.T) (err error) {
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.DBConfig.DSN = base.DBEndpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
@@ -93,24 +93,16 @@ func TestMain(m *testing.M) {
base.Free()
}
func TestFunction(t *testing.T) {
func TestFunctions(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
// Run l1 relayer test cases.
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
}

View File

@@ -6,21 +6,19 @@ import (
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/utils"
)
const (
@@ -37,6 +35,12 @@ const (
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
// ErrFullPending sender's pending pool is full.
ErrFullPending = errors.New("sender's pending pool is full")
)
var (
defaultPendingLimit = 10
)
// Confirmation struct used to indicate transaction confirmation details
@@ -74,9 +78,9 @@ type Sender struct {
// account fields.
auths *accountPool
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
stopCh chan struct{}
@@ -116,6 +120,11 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
}
}
// initialize pending limit with a default value
if config.PendingLimit == 0 {
config.PendingLimit = defaultPendingLimit
}
sender := &Sender{
ctx: ctx,
config: config,
@@ -125,7 +134,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: sync.Map{},
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
}
@@ -134,6 +143,21 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return sender, nil
}
// PendingCount returns the current number of pending txs.
func (s *Sender) PendingCount() int {
return s.pendingTxs.Count()
}
// PendingLimit returns the maximum number of pending txs the sender can handle.
func (s *Sender) PendingLimit() int {
return s.config.PendingLimit
}
// IsFull returns true if the sender's pending tx pool is full.
func (s *Sender) IsFull() bool {
return s.pendingTxs.Count() >= s.config.PendingLimit
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
@@ -159,21 +183,24 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
if s.IsFull() {
return common.Hash{}, ErrFullPending
}
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
}
}()
@@ -194,7 +221,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Store(ID, pending)
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
}
@@ -335,17 +362,17 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
s.pendingTxs.Range(func(key, value interface{}) bool {
for item := range s.pendingTxs.IterBuffered() {
key, pending := item.Key, item.Val
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
return true
if pending == nil {
continue
}
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
@@ -376,7 +403,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
@@ -398,8 +425,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
pending.submitAt = number
}
}
return true
})
}
}
// Loop is the main event loop

View File

@@ -50,13 +50,15 @@ func setupEnv(t *testing.T) {
// Load default private key.
privateKeys = []*ecdsa.PrivateKey{priv}
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
}
func TestSender(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test pending limit", func(t *testing.T) { testPendLimit(t) })
t.Run("test min gas limit", func(t *testing.T) { testMinGasLimit(t) })
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -64,6 +66,21 @@ func TestSender(t *testing.T) {
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
}
func testPendLimit(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
senderCfg.PendingLimit = 2
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
assert.NoError(t, err)
defer newSender.Stop()
for i := 0; i < newSender.PendingLimit(); i++ {
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
assert.NoError(t, err)
}
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
}
func testMinGasLimit(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
@@ -100,6 +117,7 @@ func testBatchSender(t *testing.T, batchSize int) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
senderCfg.PendingLimit = batchSize * TXBatch
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
@@ -119,7 +137,7 @@ func testBatchSender(t *testing.T, batchSize int) {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, sender.ErrNoAvailableAccount) {
if errors.Is(err, sender.ErrNoAvailableAccount) || errors.Is(err, sender.ErrFullPending) {
<-time.After(time.Second)
continue
}

View File

@@ -82,15 +82,15 @@ func setupEnv(t *testing.T) {
base.RunImages(t)
// Create l1geth container.
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
cfg.L1Config.Endpoint = base.L1GethEndpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
// Create l2geth container.
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
cfg.L2Config.Endpoint = base.L2GethEndpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
// Create db container.
cfg.DBConfig.DSN = base.DBEndpoint()
cfg.DBConfig = base.DBConfig
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)

View File

@@ -11,8 +11,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,14 +30,13 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := cfg.L1Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -81,9 +80,8 @@ func testImportL2GasPrice(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// add fake blocks
traces := []*types.WrappedBlock{

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,16 +33,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -56,7 +54,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
}
// l1 watch process events
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l1Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
@@ -79,7 +77,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed)

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,15 +33,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
// Create L2Watcher
confirmations := rpc.LatestBlockNumber
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -55,7 +55,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
@@ -123,7 +123,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -144,7 +144,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -165,7 +165,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)

View File

@@ -12,8 +12,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,13 +30,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var wrappedBlocks []*types.WrappedBlock
@@ -96,7 +95,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -126,7 +125,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)

View File

@@ -29,19 +29,6 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
func BufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
@@ -76,23 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
// @todo: add unit test.
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}

View File

@@ -1,10 +1,9 @@
package l2
package watcher
import (
"context"
"fmt"
"math"
"reflect"
"sync"
"time"
@@ -13,22 +12,22 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
bridgeabi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
)
var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commit/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/blocks/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/txs/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/gas/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
@@ -83,15 +82,13 @@ type BatchProposer struct {
proofGenerationFreq uint64
batchDataBuffer []*types.BatchData
relayer *Layer2Relayer
relayer *relayer.Layer2Relayer
piCfg *types.PublicInputHashConfig
stopCh chan struct{}
}
// NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer {
p := &BatchProposer{
mutex: sync.Mutex{},
ctx: ctx,
@@ -107,42 +104,17 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
proofGenerationFreq: cfg.ProofGenerationFreq,
piCfg: cfg.PublicInputConfig,
relayer: relayer,
stopCh: make(chan struct{}),
}
// for graceful restart.
p.recoverBatchDataBuffer()
// try to commit the leftover pending batches
p.tryCommitBatches()
p.TryCommitBatches()
return p
}
// Start the Listening process
func (p *BatchProposer) Start() {
go func() {
if reflect.ValueOf(p.orm).IsNil() {
panic("must run BatchProposer with DB")
}
ctx, cancel := context.WithCancel(p.ctx)
go utils.Loop(ctx, 2*time.Second, func() {
p.tryProposeBatch()
p.tryCommitBatches()
})
<-p.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (p *BatchProposer) Stop() {
p.stopCh <- struct{}{}
}
func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
@@ -214,7 +186,8 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
}
}
func (p *BatchProposer) tryProposeBatch() {
// TryProposeBatch will try to propose a batch.
func (p *BatchProposer) TryProposeBatch() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -243,7 +216,8 @@ func (p *BatchProposer) tryProposeBatch() {
}
}
func (p *BatchProposer) tryCommitBatches() {
// TryCommitBatches will try to commit the pending batches.
func (p *BatchProposer) TryCommitBatches() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -283,7 +257,7 @@ func (p *BatchProposer) tryCommitBatches() {
log.Error("SendCommitTx failed", "error", err)
} else {
// pop the processed batches from the buffer
bridgeL2BatchesCommitTotalCounter.Inc(1)
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
p.batchDataBuffer = p.batchDataBuffer[index:]
}
}
@@ -293,15 +267,49 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
return false
}
approximatePayloadSize := func(hash string) (uint64, error) {
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
if err != nil {
return 0, err
}
if len(traces) != 1 {
return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces))
}
size := 0
for _, tx := range traces[0].Transactions {
size += len(tx.Data)
}
return uint64(size), nil
}
firstSize, err := approximatePayloadSize(blocks[0].Hash)
if err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
if firstSize > p.commitCalldataSizeLimit {
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize)
// note: we should probably fail here once we can ensure this will not happen
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
return true
}
if blocks[0].GasUsed > p.batchGasThreshold {
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
@@ -312,24 +320,31 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
var gasUsed, txNum uint64
var gasUsed, txNum, payloadSize uint64
reachThreshold := false
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
size, err := approximatePayloadSize(block.Hash)
if err != nil {
log.Error("failed to create batch", "number", block.Number, "err", err)
return false
}
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
blocks = blocks[:i]
reachThreshold = true
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
payloadSize += size
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
@@ -342,9 +357,9 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(txNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks)))
}
return true

View File

@@ -1,4 +1,4 @@
package l2
package watcher_test
import (
"context"
@@ -6,12 +6,15 @@ import (
"math"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/common/types"
)
@@ -21,34 +24,49 @@ func testBatchProposerProposeBatch(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
wc.Start()
defer wc.Stop()
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.tryProposeBatch()
proposer.TryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData1.Hash().Hex())
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
@@ -60,13 +78,26 @@ func testBatchProposerGracefulRestart(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
batchData1 := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}, []*types.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -84,7 +115,7 @@ func testBatchProposerGracefulRestart(t *testing.T) {
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
_ = watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,

11
bridge/watcher/common.go Normal file
View File

@@ -0,0 +1,11 @@
package watcher
import "github.com/scroll-tech/go-ethereum/common"
const contractEventsBlocksFetchLimit = int64(10)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}

View File

@@ -1,9 +1,8 @@
package l1
package watcher
import (
"context"
"math/big"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -17,9 +16,8 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
@@ -33,20 +31,14 @@ var (
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
status types.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
// L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct {
ctx context.Context
client *ethclient.Client
db database.OrmFactory
@@ -67,13 +59,10 @@ type Watcher struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
stopCh chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -92,9 +81,7 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
savedL1BlockHeight = startHeight
}
stopCh := make(chan bool)
return &Watcher{
return &L1WatcherClient{
ctx: ctx,
client: client,
db: db,
@@ -111,51 +98,11 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
stopCh: stopCh,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
go func() {
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchBlockHeader(number); err != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
}
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchContractEvent(number); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
}
})
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stopCh <- true
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -201,10 +148,15 @@ func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
func (w *L1WatcherClient) FetchContractEvent() error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return err
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -317,7 +269,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -1,4 +1,4 @@
package l1
package watcher_test
import (
"context"
@@ -9,6 +9,8 @@ import (
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/watcher"
)
func testStartWatcher(t *testing.T) {
@@ -18,12 +20,11 @@ func testStartWatcher(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(base.L1GethEndpoint())
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
watcher := watcher.NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
}

View File

@@ -1,12 +1,10 @@
package l2
package watcher
import (
"context"
"errors"
"fmt"
"math/big"
"reflect"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -21,7 +19,7 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
@@ -31,22 +29,16 @@ import (
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/gap", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
event.Feed
@@ -67,18 +59,17 @@ type WatcherClient struct {
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
w := WatcherClient{
w := L2WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
@@ -92,7 +83,6 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
messageQueueABI: bridge_abi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopCh: make(chan struct{}),
stopped: 0,
}
@@ -104,7 +94,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
return &w
}
func (w *WatcherClient) initializeGenesis() error {
func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
@@ -142,46 +132,10 @@ func (w *WatcherClient) initializeGenesis() error {
return nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
}
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.tryFetchRunningMissingBlocks(ctx, number)
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.FetchContractEvent(number)
}
})
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
@@ -209,8 +163,8 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2TracesFetchedHeightGauge.Update(int64(to))
bridgeL2TracesFetchedGapGauge.Update(int64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
@@ -237,7 +191,7 @@ func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
return txsData
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock
for number := from; number <= to; number++ {
@@ -270,14 +224,18 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
func (w *L2WatcherClient) FetchContractEvent() {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -353,7 +311,7 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
}
}
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -1,4 +1,4 @@
package l2
package watcher_test
import (
"context"
@@ -19,6 +19,9 @@ import (
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -29,12 +32,16 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
l2db.Close()
}()
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
rc.Start()
defer rc.Stop()
rc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
loopToFetchEvent(subCtx, rc)
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
@@ -60,12 +67,17 @@ func testMonitorBridgeContract(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
wc.Start()
defer wc.Stop()
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
@@ -79,9 +91,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
@@ -128,7 +138,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
@@ -141,8 +157,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
@@ -195,9 +210,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.Equal(t, 5, len(msgs))
}
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *watcher.L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
return watcher.NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
@@ -209,3 +224,7 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
assert.NoError(t, err)
return auth
}
func loopToFetchEvent(subCtx context.Context, watcher *watcher.L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}

View File

@@ -0,0 +1,91 @@
package watcher_test
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
base *docker.App
// l2geth client
l2Cli *ethclient.Client
// block trace
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}
return err
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 watcher test cases.
t.Run("TestStartWatcher", testStartWatcher)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
}

View File

@@ -13,7 +13,7 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build event_watcher
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/event_watcher /bin/
ENTRYPOINT ["event_watcher"]

View File

@@ -2,4 +2,4 @@ assets/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*

View File

@@ -11,16 +11,16 @@ COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build bridge
# Build gas_oracle
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull bridge into a second stage deploy alpine container
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/bridge /bin/
COPY --from=builder /bin/gas_oracle /bin/
ENTRYPOINT ["bridge"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build msg_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
# Pull msg_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/msg_relayer /bin/
ENTRYPOINT ["msg_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/rollup_relayer /bin/
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

26
build/run_tests.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
set -uex
profile_name=$1
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/config")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"
for pkg in $all_packages; do
exclude_pkg=false
for exclude_dir in "${exclude_dirs[@]}"; do
if [[ $pkg == $exclude_dir* ]]; then
exclude_pkg=true
break
fi
done
if [ "$exclude_pkg" = false ]; then
coverpkg="$coverpkg,$pkg/..."
fi
done
echo "coverage.${profile_name}.txt"
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...

2
common/bytecode/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.go
*.sol

49
common/bytecode/Makefile Normal file
View File

@@ -0,0 +1,49 @@
.PHONY: all erc20 greeter scroll
all: erc20 greeter scroll
erc20:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./erc20/ERC20Mock.json --pkg erc20 --out ./erc20/ERC20Mock.go
greeter:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./greeter/Greeter.json --pkg greeter --out ./greeter/Greeter.go
scroll:
#L1/gateways
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1ERC20Gateway.json --pkg gateways --out ./scroll/L1/gateways/L1ERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1ERC721Gateway.json --pkg gateways --out ./scroll/L1/gateways/L1ERC721Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1ERC1155Gateway.json --pkg gateways --out ./scroll/L1/gateways/L1ERC1155Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1GatewayRouter.json --pkg gateways --out ./scroll/L1/gateways/L1GatewayRouter.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1StandardERC20Gateway.json --pkg gateways --out ./scroll/L1/gateways/L1StandardERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1CustomERC20Gateway.json --pkg gateways --out ./scroll/L1/gateways/L1CustomERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1WETHGateway.json --pkg gateways --out ./scroll/L1/gateways/L1WETHGateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/gateways/L1ETHGateway.json --pkg gateways --out ./scroll/L1/gateways/L1ETHGateway.go
#L1/rollup
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/rollup/L1MessageQueue.json --pkg rollup --out ./scroll/L1/rollup/L1MessageQueue.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/rollup/L2GasPriceOracle.json --pkg rollup --out ./scroll/L1/rollup/L2GasPriceOracle.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/rollup/ScrollChain.json --pkg rollup --out ./scroll/L1/rollup/ScrollChain.go
#L1
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L1/L1ScrollMessenger.json --pkg l1 --out ./scroll/L1/L1ScrollMessenger.go
#L2/gateways
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2CustomERC20Gateway.json --pkg gateways --out ./scroll/L2/gateways/L2CustomERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2ERC20Gateway.json --pkg gateways --out ./scroll/L2/gateways/L2ERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2ERC721Gateway.json --pkg gateways --out ./scroll/L2/gateways/L2ERC721Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2ERC1155Gateway.json --pkg gateways --out ./scroll/L2/gateways/L2ERC1155Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2GatewayRouter.json --pkg gateways --out ./scroll/L2/gateways/L2GatewayRouter.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2StandardERC20Gateway.json --pkg gateways --out ./scroll/L2/gateways/L2StandardERC20Gateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2WETHGateway.json --pkg gateways --out ./scroll/L2/gateways/L2WETHGateway.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/gateways/L2ETHGateway.json --pkg gateways --out ./scroll/L2/gateways/L2ETHGateway.go
#L2/predeploys
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/L1BlockContainer.json --pkg predeploys --out ./scroll/L2/predeploys/L1BlockContainer.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/L1GasPriceOracle.json --pkg predeploys --out ./scroll/L2/predeploys/L1GasPriceOracle.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/L2MessageQueue.json --pkg predeploys --out ./scroll/L2/predeploys/L2MessageQueue.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/L2TxFeeVault.json --pkg predeploys --out ./scroll/L2/predeploys/L2TxFeeVault.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/WETH9.json --pkg predeploys --out ./scroll/L2/predeploys/WETH9.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/predeploys/Whitelist.json --pkg predeploys --out ./scroll/L2/predeploys/Whitelist.go
#L2
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/L2/L2ScrollMessenger.json --pkg l2 --out ./scroll/L2/L2ScrollMessenger.go
#External
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/external/External.json --pkg external --out ./scroll/external/External.go
#Libraries
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/libraries/ScrollStandardERC20.json --pkg libraries --out ./scroll/libraries/ScrollStandardERC20.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./scroll/libraries/ScrollStandardERC20Factory.json --pkg libraries --out ./scroll/libraries/ScrollStandardERC20Factory.go

40
common/bytecode/README.md Normal file
View File

@@ -0,0 +1,40 @@
## How to pre deploy contracts?
* Please reference to https://github.com/scroll-tech/genesis-creator.
1. Setup env
```bash
git clone git@github.com:scroll-tech/genesis-creator.git
cd genesis-creator
go get -v github.com/scroll-tech/go-ethereum@develop && go mod tidy
make abi && make genesis-creator
make l2geth-docker
```
2. Start docker and write pre deployed contracts into genesis file.
```bash
make start-docker
./bin/genesis-creator -genesis ${SCROLLPATH}/common/docker/l2geth/genesis.json -contract [erc20|greeter]
```
3. Rebuild l2geth docker.
```bash
cd ${SCROLLPATH}
make dev_docker
```
## How to get contract abi?
* Other contracts' step same to eth20, e.g:
1. Install solc.
*Reference to https://docs.soliditylang.org/en/latest/installing-solidity.html*
2. Get abi file.
```bash
cd genesis-creator
solc --combined-json "abi" --optimize ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.sol | jq > ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.json
```
3. Translate abi to go.
```bash
cd ${SCROLLPATH}
make -C common/bytecode all
```

View File

@@ -0,0 +1,387 @@
{
"contracts": {
"tests/contracts/erc20/erc20.sol:ERC20Mock": {
"abi": [
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "address",
"name": "initialAccount",
"type": "address"
},
{
"internalType": "uint256",
"name": "initialBalance",
"type": "uint256"
}
],
"stateMutability": "payable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "approveInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "transferInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

View File

@@ -0,0 +1,72 @@
{
"contracts": {
"greeter/Greeter.sol:Greeter": {
"abi": [
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"inputs": [],
"name": "retrieve",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "retrieve_failing",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value_failing",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

1
common/bytecode/scroll/L1/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.go

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,237 @@
{
"contracts": {
"scroll/L1/gateways/L1ERC20Gateway.sol:L1ERC20Gateway": {
"abi": [
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "l1Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "l2Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "amount",
"type": "uint256"
},
{
"indexed": false,
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
],
"name": "DepositERC20",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "l1Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "l2Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "amount",
"type": "uint256"
},
{
"indexed": false,
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
],
"name": "FinalizeWithdrawERC20",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "depositERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "depositERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "depositERC20AndCall",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_l1Token",
"type": "address"
},
{
"internalType": "address",
"name": "_l2Token",
"type": "address"
},
{
"internalType": "address",
"name": "_from",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
}
],
"name": "finalizeWithdrawERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_l1Token",
"type": "address"
}
],
"name": "getL2ERC20Address",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
}
],
"bin": ""
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,256 @@
{
"contracts": {
"scroll/L2/gateways/L2ERC20Gateway.sol:L2ERC20Gateway": {
"abi": [
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "l1Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "l2Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "amount",
"type": "uint256"
},
{
"indexed": false,
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
],
"name": "FinalizeDepositERC20",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "l1Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "l2Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "amount",
"type": "uint256"
},
{
"indexed": false,
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
],
"name": "WithdrawERC20",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "l1Token",
"type": "address"
},
{
"internalType": "address",
"name": "l2Token",
"type": "address"
},
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
},
{
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
],
"name": "finalizeDepositERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "l2Token",
"type": "address"
}
],
"name": "getL1ERC20Address",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "l1Token",
"type": "address"
}
],
"name": "getL2ERC20Address",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "withdrawERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "withdrawERC20",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_token",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_amount",
"type": "uint256"
},
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
},
{
"internalType": "uint256",
"name": "_gasLimit",
"type": "uint256"
}
],
"name": "withdrawERC20AndCall",
"outputs": [],
"stateMutability": "payable",
"type": "function"
}
],
"bin": ""
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,273 @@
{
"contracts": {
"scroll/L2/predeploys/L1GasPriceOracle.sol:L1GasPriceOracle": {
"abi": [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "l1BaseFee",
"type": "uint256"
}
],
"name": "L1BaseFeeUpdated",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "overhead",
"type": "uint256"
}
],
"name": "OverheadUpdated",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_oldOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "scalar",
"type": "uint256"
}
],
"name": "ScalarUpdated",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "address",
"name": "_oldWhitelist",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "_newWhitelist",
"type": "address"
}
],
"name": "UpdateWhitelist",
"type": "event"
},
{
"inputs": [
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
}
],
"name": "getL1Fee",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
}
],
"name": "getL1GasUsed",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "l1BaseFee",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "overhead",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "scalar",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_l1BaseFee",
"type": "uint256"
}
],
"name": "setL1BaseFee",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_overhead",
"type": "uint256"
}
],
"name": "setOverhead",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_scalar",
"type": "uint256"
}
],
"name": "setScalar",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_newWhitelist",
"type": "address"
}
],
"name": "updateWhitelist",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "whitelist",
"outputs": [
{
"internalType": "contract IWhitelist",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
}
],
"bin": "608060405234801561001057600080fd5b5060405161094138038061094183398101604081905261002f9161008e565b6100388161003e565b506100be565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6000602082840312156100a057600080fd5b81516001600160a01b03811681146100b757600080fd5b9392505050565b610874806100cd6000396000f3fe608060405234801561001057600080fd5b50600436106100cf5760003560e01c8063715018a61161008c578063bede39b511610066578063bede39b51461018d578063de26c4a1146101a0578063f2fde38b146101b3578063f45e65d8146101c657600080fd5b8063715018a6146101475780638da5cb5b1461014f57806393e59dc11461017a57600080fd5b80630c18c162146100d45780633577afc5146100f05780633d0f963e1461010557806349948e0e14610118578063519b4bd31461012b5780637046559714610134575b600080fd5b6100dd60025481565b6040519081526020015b60405180910390f35b6101036100fe366004610672565b6101cf565b005b61010361011336600461068b565b610291565b6100dd6101263660046106d1565b61031c565b6100dd60015481565b610103610142366004610672565b610361565b610103610416565b600054610162906001600160a01b031681565b6040516001600160a01b0390911681526020016100e7565b600454610162906001600160a01b031681565b61010361019b366004610672565b61044c565b6100dd6101ae3660046106d1565b610533565b6101036101c136600461068b565b610596565b6100dd60035481565b6000546001600160a01b031633146102025760405162461bcd60e51b81526004016101f990610782565b60405180910390fd5b621c9c388111156102555760405162461bcd60e51b815260206004820152601760248201527f657863656564206d6178696d756d206f7665726865616400000000000000000060448201526064016101f9565b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6000546001600160a01b031633146102bb5760405162461bcd60e51b81526004016101f990610782565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f7910160405180910390a15050565b60008061032883610533565b905060006001548261033a91906107b9565b9050633b9aca006003548261034f91906107b9565b61035991906107e4565b949350505050565b6000546001600160a01b0316331461038b5760405162461bcd60e51b81526004016101f990610782565b61039b633b9aca006103e86107b9565b8111156103e15760405162461bcd60e51b8152602060048201526014602482015273657863656564206d6178696d756d207363616c6560601b60448201526064016101f9565b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a90602001610286565b6000546001600160a01b031633146104405760405162461bcd60e51b81526004016101f990610782565b61044a6000610622565b565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610495573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104b99190610806565b6104fe5760405162461bcd60e51b81526020600482015260166024820152752737ba103bb434ba32b634b9ba32b21039b2b73232b960511b60448201526064016101f9565b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c4490602001610286565b80516000908190815b818110156105865784818151811061055657610556610828565b01602001516001600160f81b0319166000036105775760048301925061057e565b6010830192505b60010161053c565b5050600254016104400192915050565b6000546001600160a01b031633146105c05760405162461bcd60e51b81526004016101f990610782565b6001600160a01b0381166106165760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016101f9565b61061f81610622565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561068457600080fd5b5035919050565b60006020828403121561069d57600080fd5b81356001600160a01b03811681146106b457600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000602082840312156106e357600080fd5b813567ffffffffffffffff808211156106fb57600080fd5b818401915084601f83011261070f57600080fd5b813581811115610721576107216106bb565b604051601f8201601f19908116603f01168101908382118183101715610749576107496106bb565b8160405282815287602084870101111561076257600080fd5b826020860160208301376000928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b80820281158282048414176107de57634e487b7160e01b600052601160045260246000fd5b92915050565b60008261080157634e487b7160e01b600052601260045260246000fd5b500490565b60006020828403121561081857600080fd5b815180151581146106b457600080fd5b634e487b7160e01b600052603260045260246000fdfea26469706673582212201c98d19f8bb4842db4083fd4395f8be55b5537cd9cc84c94de56dd07f20f2a9a64736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

View File

@@ -0,0 +1,189 @@
{
"contracts": {
"scroll/L2/predeploys/L2MessageQueue.sol:L2MessageQueue": {
"abi": [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "index",
"type": "uint256"
},
{
"indexed": false,
"internalType": "bytes32",
"name": "messageHash",
"type": "bytes32"
}
],
"name": "AppendMessage",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_oldOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_messageHash",
"type": "bytes32"
}
],
"name": "appendMessage",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"name": "branches",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "initialize",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "messageRoot",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "messenger",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "nextMessageIndex",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_messenger",
"type": "address"
}
],
"name": "updateMessenger",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
],
"bin": "608060405234801561001057600080fd5b5060405161071e38038061071e83398101604081905261002f91610090565b6100388161003e565b506100c0565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b6000602082840312156100a257600080fd5b81516001600160a01b03811681146100b957600080fd5b9392505050565b61064f806100cf6000396000f3fe608060405234801561001057600080fd5b506004361061009e5760003560e01c806383cc76601161006657806383cc76601461010f5780638da5cb5b146101225780639e7adc7914610135578063d4b9f4fa14610148578063f2fde38b1461015157600080fd5b806326aad7b7146100a35780633cb747bf146100bf578063600a2e77146100ea578063715018a6146100fd5780638129fc1c14610107575b600080fd5b6100ac60015481565b6040519081526020015b60405180910390f35b6053546100d2906001600160a01b031681565b6040516001600160a01b0390911681526020016100b6565b6100ac6100f8366004610519565b610164565b610105610209565b005b61010561023f565b6100ac61011d366004610519565b610247565b6052546100d2906001600160a01b031681565b610105610143366004610532565b61025e565b6100ac60005481565b61010561015f366004610532565b6102fa565b6053546000906001600160a01b031633146101b75760405162461bcd60e51b815260206004820152600e60248201526d37b7363c9036b2b9b9b2b733b2b960911b60448201526064015b60405180910390fd5b6000806101c384610386565b60408051838152602081018890529294509092507ffaa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693910160405180910390a19392505050565b6052546001600160a01b031633146102335760405162461bcd60e51b81526004016101ae90610562565b61023d6000610455565b565b61023d6104a7565b602a816028811061025757600080fd5b0154905081565b6052546001600160a01b031633146102885760405162461bcd60e51b81526004016101ae90610562565b600154156102d85760405162461bcd60e51b815260206004820152601760248201527f63616e6e6f7420757064617465206d657373656e67657200000000000000000060448201526064016101ae565b605380546001600160a01b0319166001600160a01b0392909216919091179055565b6052546001600160a01b031633146103245760405162461bcd60e51b81526004016101ae90610562565b6001600160a01b03811661037a5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016101ae565b61038381610455565b50565b600154600090819083825b8215610425576103a2600284610599565b6000036103ee5781602a82602881106103bd576103bd6105bb565b01556103e782600283602881106103d6576103d66105bb565b015460009182526020526040902090565b9150610419565b610416602a8260288110610404576104046105bb565b01548360009182526020526040902090565b91505b600192831c9201610391565b81602a8260288110610439576104396105bb565b0155506000819055600180548082019091559590945092505050565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b60005b60286104b78260016105e7565b1015610383576104e8600282602881106104d3576104d36105bb565b0154600283602881106103d6576103d66105bb565b60026104f58360016105e7565b60288110610505576105056105bb565b01558061051181610600565b9150506104aa565b60006020828403121561052b57600080fd5b5035919050565b60006020828403121561054457600080fd5b81356001600160a01b038116811461055b57600080fd5b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b6000826105b657634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052601160045260246000fd5b808201808211156105fa576105fa6105d1565b92915050565b600060018201610612576106126105d1565b506001019056fea26469706673582212207432771484e2f96fab1320fa6db8132bafc7a1070b9661d72ca0c2b097ccc93764736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

View File

@@ -0,0 +1,205 @@
{
"contracts": {
"scroll/L2/predeploys/L2TxFeeVault.sol:L2TxFeeVault": {
"abi": [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
},
{
"internalType": "address",
"name": "_recipient",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_oldOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
},
{
"indexed": false,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "address",
"name": "from",
"type": "address"
}
],
"name": "Withdrawal",
"type": "event"
},
{
"inputs": [],
"name": "messenger",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "minWithdrawAmount",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "recipient",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "totalProcessed",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_messenger",
"type": "address"
}
],
"name": "updateMessenger",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_minWithdrawAmount",
"type": "uint256"
}
],
"name": "updateMinWithdrawAmount",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_recipient",
"type": "address"
}
],
"name": "updateRecipient",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "withdraw",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"stateMutability": "payable",
"type": "receive"
}
],
"bin": "608060405234801561001057600080fd5b5060405161078938038061078983398101604081905261002f916100db565b8181678ac7230489e800006100438361006f565b600155600380546001600160a01b0319166001600160a01b03929092169190911790555061010e915050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80516001600160a01b03811681146100d657600080fd5b919050565b600080604083850312156100ee57600080fd5b6100f7836100bf565b9150610105602084016100bf565b90509250929050565b61066c8061011d6000396000f3fe6080604052600436106100a05760003560e01c806384411d651161006457806384411d65146101595780638da5cb5b1461016f5780639e7adc791461018f578063f2fde38b146101af578063feec756c146101cf578063ff4f3546146101ef57600080fd5b80633cb747bf146100ac5780633ccfd60b146100e9578063457e1a491461010057806366d003ac14610124578063715018a61461014457600080fd5b366100a757005b600080fd5b3480156100b857600080fd5b506002546100cc906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b3480156100f557600080fd5b506100fe61020f565b005b34801561010c57600080fd5b5061011660015481565b6040519081526020016100e0565b34801561013057600080fd5b506003546100cc906001600160a01b031681565b34801561015057600080fd5b506100fe610371565b34801561016557600080fd5b5061011660045481565b34801561017b57600080fd5b506000546100cc906001600160a01b031681565b34801561019b57600080fd5b506100fe6101aa36600461054a565b6103a7565b3480156101bb57600080fd5b506100fe6101ca36600461054a565b6103f3565b3480156101db57600080fd5b506100fe6101ea36600461054a565b61047f565b3480156101fb57600080fd5b506100fe61020a36600461057a565b6104cb565b60015447908110156102a15760405162461bcd60e51b815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d20776974686472616064820152691dd85b08185b5bdd5b9d60b21b608482015260a4015b60405180910390fd5b6004805482019055600354604080518381526001600160a01b0390921660208301523382820152517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1600254600354604080516020810182526000808252915163b2267a7b60e01b81526001600160a01b039485169463b2267a7b94879461033c949190921692859290600401610593565b6000604051808303818588803b15801561035557600080fd5b505af1158015610369573d6000803e3d6000fd5b505050505050565b6000546001600160a01b0316331461039b5760405162461bcd60e51b8152600401610298906105ff565b6103a560006104fa565b565b6000546001600160a01b031633146103d15760405162461bcd60e51b8152600401610298906105ff565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b6000546001600160a01b0316331461041d5760405162461bcd60e51b8152600401610298906105ff565b6001600160a01b0381166104735760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610298565b61047c816104fa565b50565b6000546001600160a01b031633146104a95760405162461bcd60e51b8152600401610298906105ff565b600380546001600160a01b0319166001600160a01b0392909216919091179055565b6000546001600160a01b031633146104f55760405162461bcd60e51b8152600401610298906105ff565b600155565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561055c57600080fd5b81356001600160a01b038116811461057357600080fd5b9392505050565b60006020828403121561058c57600080fd5b5035919050565b60018060a01b038516815260006020858184015260806040840152845180608085015260005b818110156105d55786810183015185820160a0015282016105b9565b50600060a0828601015260a0601f19601f8301168501019250505082606083015295945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e657200000000000000000060408201526060019056fea2646970667358221220dcfa71b82537a035f973551a5691e8d33e9d8768387a62ce2e7de75750c9e3c464736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

View File

@@ -0,0 +1,294 @@
{
"contracts": {
"WETH9.sol:WETH9": {
"abi": [
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "src",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "guy",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "dst",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "Deposit",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "src",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "dst",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "src",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "Withdrawal",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
},
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "guy",
"type": "address"
},
{
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "deposit",
"outputs": [],
"stateMutability": "payable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "dst",
"type": "address"
},
{
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "src",
"type": "address"
},
{
"internalType": "address",
"name": "dst",
"type": "address"
},
{
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "wad",
"type": "uint256"
}
],
"name": "withdraw",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"stateMutability": "payable",
"type": "receive"
}
],
"bin": "60c0604052600d60809081526c2bb930b83832b21022ba3432b960991b60a05260009061002c9082610114565b506040805180820190915260048152630ae8aa8960e31b60208201526001906100559082610114565b506002805460ff1916601217905534801561006f57600080fd5b506101d3565b634e487b7160e01b600052604160045260246000fd5b600181811c9082168061009f57607f821691505b6020821081036100bf57634e487b7160e01b600052602260045260246000fd5b50919050565b601f82111561010f57600081815260208120601f850160051c810160208610156100ec5750805b601f850160051c820191505b8181101561010b578281556001016100f8565b5050505b505050565b81516001600160401b0381111561012d5761012d610075565b6101418161013b845461008b565b846100c5565b602080601f831160018114610176576000841561015e5750858301515b600019600386901b1c1916600185901b17855561010b565b600085815260208120601f198616915b828110156101a557888601518255948401946001909101908401610186565b50858210156101c35787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b610715806101e26000396000f3fe6080604052600436106100a05760003560e01c8063313ce56711610064578063313ce5671461016c57806370a082311461019857806395d89b41146101c5578063a9059cbb146101da578063d0e30db0146101fa578063dd62ed3e1461020257600080fd5b806306fdde03146100b4578063095ea7b3146100df57806318160ddd1461010f57806323b872dd1461012c5780632e1a7d4d1461014c57600080fd5b366100af576100ad61023a565b005b600080fd5b3480156100c057600080fd5b506100c9610288565b6040516100d6919061056e565b60405180910390f35b3480156100eb57600080fd5b506100ff6100fa3660046105d8565b610316565b60405190151581526020016100d6565b34801561011b57600080fd5b50475b6040519081526020016100d6565b34801561013857600080fd5b506100ff610147366004610602565b610382565b34801561015857600080fd5b506100ad61016736600461063e565b6104b9565b34801561017857600080fd5b506002546101869060ff1681565b60405160ff90911681526020016100d6565b3480156101a457600080fd5b5061011e6101b3366004610657565b60036020526000908152604090205481565b3480156101d157600080fd5b506100c961054d565b3480156101e657600080fd5b506100ff6101f53660046105d8565b61055a565b6100ad61023a565b34801561020e57600080fd5b5061011e61021d366004610672565b600460209081526000928352604080842090915290825290205481565b3360008181526003602090815260409182902080543490810190915591519182527fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c910160405180910390a2565b60008054610295906106a5565b80601f01602080910402602001604051908101604052809291908181526020018280546102c1906106a5565b801561030e5780601f106102e35761010080835404028352916020019161030e565b820191906000526020600020905b8154815290600101906020018083116102f157829003601f168201915b505050505081565b3360008181526004602090815260408083206001600160a01b038716808552925280832085905551919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925906103719086815260200190565b60405180910390a350600192915050565b6001600160a01b0383166000908152600360205260408120548211156103a757600080fd5b6001600160a01b03841633148015906103e557506001600160a01b038416600090815260046020908152604080832033845290915290205460001914155b15610445576001600160a01b038416600090815260046020908152604080832033845290915290205482111561041a57600080fd5b6001600160a01b03841660009081526004602090815260408083203384529091529020805483900390555b6001600160a01b03808516600081815260036020526040808220805487900390559286168082529083902080548601905591517fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef906104a79086815260200190565b60405180910390a35060019392505050565b336000908152600360205260409020548111156104d557600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f19350505050158015610514573d6000803e3d6000fd5b5060405181815233907f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b659060200160405180910390a250565b60018054610295906106a5565b6000610567338484610382565b9392505050565b600060208083528351808285015260005b8181101561059b5785810183015185820160400152820161057f565b506000604082860101526040601f19601f8301168501019250505092915050565b80356001600160a01b03811681146105d357600080fd5b919050565b600080604083850312156105eb57600080fd5b6105f4836105bc565b946020939093013593505050565b60008060006060848603121561061757600080fd5b610620846105bc565b925061062e602085016105bc565b9150604084013590509250925092565b60006020828403121561065057600080fd5b5035919050565b60006020828403121561066957600080fd5b610567826105bc565b6000806040838503121561068557600080fd5b61068e836105bc565b915061069c602084016105bc565b90509250929050565b600181811c908216806106b957607f821691505b6020821081036106d957634e487b7160e01b600052602260045260246000fd5b5091905056fea264697066735822122058a8215b91082f79e569166dbb8e4b203b24e565519a4e44e503f096c918ac1b64736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

View File

@@ -0,0 +1,129 @@
{
"contracts": {
"Whitelist.sol:Whitelist": {
"abi": [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_oldOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_account",
"type": "address"
},
{
"indexed": false,
"internalType": "bool",
"name": "_status",
"type": "bool"
}
],
"name": "WhitelistStatusChanged",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "_sender",
"type": "address"
}
],
"name": "isSenderAllowed",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address[]",
"name": "_accounts",
"type": "address[]"
},
{
"internalType": "bool",
"name": "_status",
"type": "bool"
}
],
"name": "updateWhitelistStatus",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
],
"bin": "608060405234801561001057600080fd5b5060405161058c38038061058c83398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b6104f9806100936000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c8063715018a61461005c57806379586dd7146100665780638da5cb5b14610079578063efc78401146100a9578063f2fde38b146100e5575b600080fd5b6100646100f8565b005b610064610074366004610356565b610137565b60005461008c906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b6100d56100b736600461042d565b6001600160a01b031660009081526001602052604090205460ff1690565b60405190151581526020016100a0565b6100646100f336600461042d565b610238565b6000546001600160a01b0316331461012b5760405162461bcd60e51b81526004016101229061044f565b60405180910390fd5b61013560006102c4565b565b6000546001600160a01b031633146101615760405162461bcd60e51b81526004016101229061044f565b60005b825181101561023357816001600085848151811061018457610184610486565b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff0219169083151502179055508281815181106101d5576101d5610486565b60200260200101516001600160a01b03167f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d83604051610219911515815260200190565b60405180910390a28061022b8161049c565b915050610164565b505050565b6000546001600160a01b031633146102625760405162461bcd60e51b81526004016101229061044f565b6001600160a01b0381166102b85760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610122565b6102c1816102c4565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b634e487b7160e01b600052604160045260246000fd5b80356001600160a01b038116811461034157600080fd5b919050565b8035801515811461034157600080fd5b6000806040838503121561036957600080fd5b823567ffffffffffffffff8082111561038157600080fd5b818501915085601f83011261039557600080fd5b81356020828211156103a9576103a9610314565b8160051b604051601f19603f830116810181811086821117156103ce576103ce610314565b6040529283528183019350848101820192898411156103ec57600080fd5b948201945b83861015610411576104028661032a565b855294820194938201936103f1565b96506104209050878201610346565b9450505050509250929050565b60006020828403121561043f57600080fd5b6104488261032a565b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b600052603260045260246000fd5b6000600182016104bc57634e487b7160e01b600052601160045260246000fd5b506001019056fea2646970667358221220a63efbab90c35477222a31d2cbe502ea57d2bf3e43c07431c0797047a6bfaa7564736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,153 @@
{
"contracts": {
"scroll/libraries/ScrollStandardERC20Factory.sol:ScrollStandardERC20Factory": {
"abi": [
{
"inputs": [
{
"internalType": "address",
"name": "_implementation",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_l1Token",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_l2Token",
"type": "address"
}
],
"name": "DeployToken",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "_gateway",
"type": "address"
},
{
"internalType": "address",
"name": "_l1Token",
"type": "address"
}
],
"name": "computeL2TokenAddress",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_gateway",
"type": "address"
},
{
"internalType": "address",
"name": "_l1Token",
"type": "address"
}
],
"name": "deployL2Token",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "implementation",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
],
"bin": "608060405234801561001057600080fd5b5060405161064238038061064283398101604081905261002f91610107565b610038336100b7565b6001600160a01b0381166100925760405162461bcd60e51b815260206004820152601b60248201527f7a65726f20696d706c656d656e746174696f6e20616464726573730000000000604482015260640160405180910390fd5b600180546001600160a01b0319166001600160a01b0392909216919091179055610137565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561011957600080fd5b81516001600160a01b038116811461013057600080fd5b9392505050565b6104fc806101466000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c80635c60da1b1461006757806361e98ca114610096578063715018a6146100a95780637bdbcbbf146100b35780638da5cb5b146100c6578063f2fde38b146100d7575b600080fd5b60015461007a906001600160a01b031681565b6040516001600160a01b03909116815260200160405180910390f35b61007a6100a4366004610443565b6100ea565b6100b161011a565b005b61007a6100c1366004610443565b610159565b6000546001600160a01b031661007a565b6100b16100e5366004610476565b6101a9565b6000806100f78484610244565b600154909150610110906001600160a01b0316826102ca565b9150505b92915050565b6000546001600160a01b0316331461014d5760405162461bcd60e51b815260040161014490610491565b60405180910390fd5b6101576000610337565b565b600080546001600160a01b031633146101845760405162461bcd60e51b815260040161014490610491565b60006101908484610244565b600154909150610110906001600160a01b031682610387565b6000546001600160a01b031633146101d35760405162461bcd60e51b815260040161014490610491565b6001600160a01b0381166102385760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b6064820152608401610144565b61024181610337565b50565b6040516bffffffffffffffffffffffff19606083901b1660208201526000908390603401604051602081830303815290604052805190602001206040516020016102ac92919060609290921b6bffffffffffffffffffffffff19168252601482015260340190565b60405160208183030381529060405280519060200120905092915050565b6000610330838330604051733d602d80600a3d3981f3363d3d373d3d3d363d7360601b8152606093841b60148201526f5af43d82803e903d91602b57fd5bf3ff60801b6028820152921b6038830152604c8201526037808220606c830152605591012090565b9392505050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6000604051733d602d80600a3d3981f3363d3d373d3d3d363d7360601b81528360601b60148201526e5af43d82803e903d91602b57fd5bf360881b6028820152826037826000f59150506001600160a01b0381166101145760405162461bcd60e51b815260206004820152601760248201527f455243313136373a2063726561746532206661696c65640000000000000000006044820152606401610144565b80356001600160a01b038116811461043e57600080fd5b919050565b6000806040838503121561045657600080fd5b61045f83610427565b915061046d60208401610427565b90509250929050565b60006020828403121561048857600080fd5b61033082610427565b6020808252818101527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260408201526060019056fea2646970667358221220a22c8dbd9f00add51d52c56373782b410ac3198d44de2cc04ba9d1ef6a8ddbd364736f6c63430008110033"
}
},
"version": "0.8.17+commit.8df45f5f.Darwin.appleclang"
}

View File

@@ -31,6 +31,8 @@ type Cmd struct {
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
// open log flag.
openLog bool
// error channel
ErrChan chan error
}
@@ -64,7 +66,7 @@ func (c *Cmd) runCmd() {
// RunCmd parallel running when parallel is true.
func (c *Cmd) RunCmd(parallel bool) {
fmt.Println("cmd: ", c.args)
fmt.Println("cmd:", c.args)
if parallel {
go c.runCmd()
} else {
@@ -72,12 +74,17 @@ func (c *Cmd) RunCmd(parallel bool) {
}
}
// OpenLog open cmd log by this api.
func (c *Cmd) OpenLog(open bool) {
c.openLog = open
}
func (c *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
fmt.Printf("%s: %v", c.name, out)
if verbose || c.openLog {
fmt.Printf("%s:\n\t%v", c.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
fmt.Printf("%s: %v", c.name, out)
fmt.Printf("%s:\n\t%v", c.name, out)
}
go c.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)

37
common/docker/config.go Normal file
View File

@@ -0,0 +1,37 @@
package docker
import (
"github.com/scroll-tech/go-ethereum/common"
)
// L1Contracts stores pre-deployed contracts address of scroll_l1geth
type L1Contracts struct {
L1WETH common.Address `json:"L1WETH"`
L2GasPriceOracle common.Address `json:"L2GasPriceOracle"`
L1ScrollChain common.Address `json:"L1ScrollChain"`
L1MessageQueue common.Address `json:"L1MessageQueue"`
L1ScrollMessenger common.Address `json:"L1ScrollMessenger"`
L1GatewayRouter common.Address `json:"L1GatewayRouter"`
L1StandardERC20Gateway common.Address `json:"L1StandardERC20Gateway"`
}
// L2Contracts stores pre-deployed contracts address of scroll_l2geth
type L2Contracts struct {
L1BlockContainer common.Address `json:"L1BlockContainer"`
L1GasPriceOracle common.Address `json:"L1GasPriceOracle"`
L2ProxyAdmin common.Address `json:"L2ProxyAdmin"`
L2ScrollStandardERC20Factory common.Address `json:"L2ScrollStandardERC20Factory"`
L2ScrollMessenger common.Address `json:"L2ScrollMessenger"`
L2MessageQueue common.Address `json:"L2MessageQueue"`
L2TxFeeVault common.Address `json:"L2TxFeeVault"`
L2GatewayRouter common.Address `json:"L2GatewayRouter"`
L2StandardERC20Gateway common.Address `json:"L2StandardERC20Gateway"`
}
// ContractsList all contracts addresses which are needed to be tested.
type ContractsList struct {
L1Contracts *L1Contracts `json:"l1_contracts,omitempty"`
L2Contracts *L2Contracts `json:"l2_contracts,omitempty"`
ERC20 common.Address `json:"erc20,omitempty"`
Greeter common.Address `json:"greeter,omitempty"`
}

View File

@@ -0,0 +1,24 @@
{
"l1_contracts": {
"L1WETH": "0x7ecd3166e45b8fc8ff5b6f88af405f05c84fe28b",
"L2GasPriceOracle": "0xf0fb0109ec4c0ffe1c3708010ac39df342ad13c6",
"L1ScrollChain": "0x2dfda18f6e0c753f37323a5465a4a2999901f772",
"L1MessageQueue": "0x4221e6c50c004a0a6d5b83e17077c2dbaf3be239",
"L1ScrollMessenger": "0x5edd6a65e63d4bf5955bf890e1889f5d00d12f4e",
"L1GatewayRouter": "0xba3e049c06254d4d02d4895e82a29d56e788a9f0",
"L1StandardERC20Gateway": "0x81bfe9f57ef1a68f09c298fc5d756f657a69d809"
},
"l2_contracts": {
"L1BlockContainer": "0xf0fb0109ec4c0ffe1c3708010ac39df342ad13c6",
"L1GasPriceOracle": "0x5edd6a65e63d4bf5955bf890e1889f5d00d12f4e",
"L2ProxyAdmin": "0x52128e5d5812f699f9ed2f56f744e1eefe78db92",
"L2ScrollStandardERC20Factory": "0x6edca115a58590c2b236284a6c62dec3103d8fc4",
"L2ScrollMessenger": "0x4221e6c50c004a0a6d5b83e17077c2dbaf3be239",
"L2MessageQueue": "0xf86a9a06e2464539d139ec6d3ad3c63e9bb4bd3c",
"L2TxFeeVault": "0x81bfe9f57ef1a68f09c298fc5d756f657a69d809",
"L2GatewayRouter": "0x7363726f6c6c6c02000000000000000000000007",
"L2StandardERC20Gateway": "0x24dd98a7f678bfaa5453130ddba5f7aa1b594df0"
},
"erc20": "0xd1c8d1a7bdef9cf9e9d05f01bd36d5f8a7501d3f",
"greeter": "0xba3e049c06254d4d02d4895e82a29d56e788a9f0"
}

View File

@@ -1,8 +1,9 @@
package docker
import (
"context"
"crypto/rand"
"database/sql"
_ "embed" //nolint:golint
"encoding/json"
"fmt"
"math/big"
@@ -11,13 +12,11 @@ import (
"time"
"github.com/jmoiron/sqlx"
"github.com/modern-go/reflect2"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
@@ -27,208 +26,183 @@ var (
dbStartPort = 30000
)
//go:embed contracts_list.json
var contractsList []byte
// AppAPI app interface.
type AppAPI interface {
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
l1gethImg ImgInstance
l2gethImg ImgInstance
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbImg ImgInstance
dbConfig *database.DBConfig
dbFile string
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// pre deployed contracts' addresses.
ContractsList
// common time stamp.
timestamp int
Timestamp int
}
// NewDockerApp returns new instance of dokerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
return &App{
timestamp: timestamp,
dbFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
// Unmarshal contracts addresses.
if err := json.Unmarshal(contractsList, &app.ContractsList); err != nil {
panic(err)
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.runDBImage(t)
b.runL1Geth(t)
b.runL2Geth(t)
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
func (b *App) runDBImage(t *testing.T) {
if b.dbImg != nil {
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
b.dbImg = newTestDBDocker(t, "postgres")
if err := b.mockDBConfig(); err != nil {
_ = b.dbImg.Stop()
b.dbImg = nil
_ = os.Remove(b.dbFile)
t.Fatal(err)
}
assert.NoError(t, b.DBImg.Start())
var isRun bool
// try 5 times until the db is ready.
utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
isRun = err == nil && db != nil && db.Ping() == nil
return isRun
})
assert.Equal(t, true, isRun)
}
// RunDBApp runs DB app with command
func (b *App) RunDBApp(t *testing.T, option, keyword string) {
args := []string{option, "--config", b.dbFile}
app := cmd.NewCmd("db_cli-test", args...)
defer app.WaitExit()
// Wait expect result.
app.ExpectWithTimeout(t, true, time.Second*3, keyword)
app.RunApp(nil)
}
// Free clear all running images
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.l1gethImg != nil {
_ = b.l1gethImg.Stop()
b.l1gethImg = nil
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.l2gethImg != nil {
_ = b.l2gethImg.Stop()
b.l2gethImg = nil
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.dbImg != nil {
_ = b.dbImg.Stop()
b.dbImg = nil
_ = os.Remove(b.dbFile)
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// L1GethEndpoint returns l1gethimg endpoint
func (b *App) L1GethEndpoint() string {
if b.l1gethImg != nil {
return b.l1gethImg.Endpoint()
}
return ""
}
// L2GethEndpoint returns l2gethimg endpoint
func (b *App) L2GethEndpoint() string {
if b.l2gethImg != nil {
return b.l2gethImg.Endpoint()
}
return ""
}
// DBEndpoint returns the endpoint of the dbimg
func (b *App) DBEndpoint() string {
return b.dbImg.Endpoint()
}
func (b *App) runL1Geth(t *testing.T) {
if b.l1gethImg != nil {
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
b.l1gethImg = newTestL1Docker(t)
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if b.l1gethImg == nil || reflect2.IsNil(b.l1gethImg) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.l1gethImg.Endpoint())
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
func (b *App) runL2Geth(t *testing.T) {
if b.l2gethImg != nil {
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
b.l2gethImg = newTestL2Docker(t)
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if b.l2gethImg == nil || reflect2.IsNil(b.l2gethImg) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.l2gethImg.Endpoint())
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
if b.dbConfig == nil {
b.dbConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.dbImg != nil {
b.dbConfig.DSN = b.dbImg.Endpoint()
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.dbConfig)
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.dbFile, data, 0644) //nolint:gosec
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker(t *testing.T) ImgInstance {
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker(t *testing.T) ImgInstance {
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(t *testing.T, driverName string) ImgInstance {
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(10, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

Some files were not shown because too many files have changed in this diff Show More