Compare commits

..

49 Commits

Author SHA1 Message Date
Péter Garamvölgyi
ff30622314 feat: auto-finalize batches on Alpha testnet (#791) 2023-08-15 15:41:54 +08:00
HAOYUatHZ
4bca06a4c1 Merge remote-tracking branch 'origin/develop' into alpha 2023-05-09 15:52:48 +08:00
georgehao
e086c2739f test(bridge): add watcher tests (#451)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-05-09 15:46:24 +08:00
Max Wolff
4dfa81d890 feat(contracts): remove L1 Fee in Oracle (#454) 2023-05-09 15:09:42 +08:00
HAOYUatHZ
4549439dab chore(coordinator): add more logs to VerifyToken (#456) 2023-05-08 22:30:52 +08:00
maskpp
b4d8e5ffd7 test:bump l2geth docker version (#455) 2023-05-08 19:46:21 +08:00
Lawliet-Chan
5b49c81692 feat(roller&coordinator): add aggregation roller communication protocol (#432)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-05-06 20:19:43 +08:00
colin
de735694fb test(bridge): add unit tests (#439)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-05-06 11:43:02 +08:00
HAOYUatHZ
166fd1c33d refactor: fix instance_commitments naming (#449) 2023-05-05 09:03:30 +08:00
Jerry Ho
c4a67c7444 docs(coordinator): add basic doc for coordinator (#435)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: Orest Tarasiuk <830847+OrestTa@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-05-04 22:02:02 +08:00
Péter Garamvölgyi
168eaf0fc2 Respect size limit in batch proposer (#446) 2023-05-04 08:44:05 +02:00
Orest Tarasiuk
85a1d5967f fix xargs parameter deprecation for -i (portability to macOS) (#445) 2023-05-04 14:06:36 +08:00
HAOYUatHZ
46f8b0e36c doc: add prerequisites (#443) 2023-04-28 16:51:41 +08:00
HAOYUatHZ
b7c39f64a7 refactor: move message package (#442) 2023-04-28 16:20:54 +08:00
georgehao
7fea3f5c22 test(coordinator): add API unit-tests (#433)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-26 10:20:36 +08:00
maskpp
7afddae276 fix(common): fix bug in auth message (#440)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-04-25 19:24:36 +08:00
colin
10ac638a51 test(coordinator): add more unit tests (#430)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-23 22:28:03 +08:00
maskpp
2fafb64e0a test(integration): add predeployed erc20 & greeter contract tests (#428)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-23 19:55:22 +08:00
maskpp
ab1cda6568 test(coordinator): simplify coordinator tests (#434) 2023-04-23 17:15:28 +08:00
colin
905961d0ad fix(Jenkinsfile): specify -coverpkg in unit tests (#431) 2023-04-21 20:50:36 +08:00
Xi Lin
cf9f0b921f feat(contracts): forward data to receiver after deposit/withdraw (#429)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 19:27:49 +08:00
maskpp
0f26e0d67b test(integration): pre-deploy test contracts into genesis (#420)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-21 16:23:16 +08:00
maskpp
c774462d1d test(integration-test): refactor integration-test (#425) 2023-04-18 21:33:12 +08:00
colin
7eb6d9696a chore(bridge & coordinator): clear some unused code (#423) 2023-04-17 22:09:52 +08:00
Lawliet-Chan
401ea68332 feat(libzkp): recover rust zkp panic instead of crashing (#421) 2023-04-14 20:51:50 +08:00
maskpp
c13e8aafc4 feat(tests): update docker app (#402)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-13 08:40:51 +08:00
colin
2b2cc62efe fix(coordinator): add metric roller_proofs_generated_failed_time (#419) 2023-04-12 18:27:55 +08:00
colin
807b7c7f33 refactor(coordinator): adjust logs for Loki query (#417)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-04-11 16:38:52 +08:00
HAOYUatHZ
454f032c9f doc(test): add testing doc (#412) 2023-04-11 11:05:33 +08:00
maskpp
d1c4fa716d fix(test): Clean the exited container by --rm after container stopped. (#416) 2023-04-10 19:18:59 +08:00
Lawliet-Chan
de1e40d19c fix(libzkp): load_params and seed in zk (#415) 2023-04-10 15:41:16 +08:00
Ahmed Castro
76b5a6c751 Small typo fix on documentation comments (#411)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-09 15:34:27 +08:00
colin
bad77eac2f feat(coordinator): prover monitoring (#392)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-07 09:06:58 +08:00
Péter Garamvölgyi
5d761ad812 Make sure attempts can be deserialized from db on startup (#410) 2023-04-05 19:00:54 +02:00
Nazarii Denha
4042bea6db retry proving timeout batch (#313)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-04-05 16:42:06 +02:00
maskpp
de7c38a903 feat(test): let integration-test log verbosity be configurable (#409) 2023-04-04 16:20:12 +08:00
Péter Garamvölgyi
071a7772dd fix already executed revert message (#407)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-03 16:19:40 +02:00
Péter Garamvölgyi
9d61149311 Merge tag 'v3.0.1' into alpha
fetch block transaction data instead of trace
2023-04-03 15:32:10 +02:00
Péter Garamvölgyi
41e2d960d8 Fix already executed revert message (#408) 2023-04-03 21:26:30 +08:00
HAOYUatHZ
170bc08207 build(docker): auto docker push when pushing git tags (#406) 2023-04-03 16:52:51 +08:00
maskpp
d3fc4e1606 feat(pending limit): Let sender's pending limit be configurable. (#398)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-04-03 14:24:47 +08:00
HAOYUatHZ
77749477db build(docker): only build docker images when push github tags (#404) 2023-04-01 11:54:56 +08:00
HAOYUatHZ
1a5df6f4d7 fix(build): move docker build from jenkins to github to avoid unknown errors (#403) 2023-03-31 15:55:55 +08:00
maskpp
826280253a fix(test): fix bug in testBatchProposerProposeBatch (#399)
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 13:58:46 +08:00
ChuhanJin
d376c903af feat(bridge): separate bridge into subcomponents (#397)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <651734127@qq.com>
2023-03-31 11:04:24 +08:00
HAOYUatHZ
0b607507b7 Merge pull request #381 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-21 21:36:04 +08:00
HAOYUatHZ
462e85e591 Merge pull request #372 from scroll-tech/develop
merge develop into alpha
2023-03-18 16:24:42 +08:00
HAOYUatHZ
f46391c92b Merge pull request #370 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-16 10:29:48 +08:00
HAOYUatHZ
10cececb60 Merge pull request #367 from scroll-tech/develop
merge develop into alpha
2023-03-15 09:07:27 +08:00
169 changed files with 5865 additions and 2502 deletions

View File

@@ -66,3 +66,11 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

View File

@@ -62,3 +62,18 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
# uses: docker/build-push-action@v2
# with:
# context: .
# file: ./build/dockerfiles/coordinator.Dockerfile
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}

65
.github/workflows/docker.yaml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: Docker
on:
push:
tags:
- v**
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
push: true
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/msg_relayer.Dockerfile
push: true
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
push: true
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

17
Jenkinsfile vendored
View File

@@ -24,11 +24,12 @@ pipeline {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
sh 'make -C common/bytecode all'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
sh 'make -C bridge bridge_bins'
}
}
stage('Check Coordinator Compilation') {
@@ -42,16 +43,6 @@ pipeline {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
@@ -68,12 +59,12 @@ pipeline {
}
stage('Race test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic scroll-tech/bridge/...'
sh "cd ./bridge && ../build/run_tests.sh bridge"
}
}
stage('Race test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic scroll-tech/coordinator/...'
sh "cd ./coordinator && ../build/run_tests.sh coordinator"
}
}
stage('Race test database package') {

View File

@@ -1,3 +1,12 @@
# Scroll Monorepo
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
## Prerequisites
+ go1.18
+ rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ hardhat / foundry
---
For a more comprehensive doc, see [`docs/`](./docs).

View File

@@ -8,8 +8,23 @@ mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
bridge_bins: ## Builds the Bridge bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
event_watcher: ## Builds the event_watcher bin
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
gas_oracle: ## Builds the gas_oracle bin
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
message_relayer: ## Builds the message_relayer bin
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
rollup_relayer: ## Builds the rollup_relayer bin
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
@@ -20,8 +35,14 @@ lint: ## Lint the files - used for CI
clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile

View File

@@ -104,7 +104,7 @@ var L1MessageQueueMetaData = &bind.MetaData{
// L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract.
var L2GasPriceOracleMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2ScrollMessengerMetaData contains all meta data concerning the L2ScrollMessenger contract.

View File

@@ -1,130 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// Init db connection.
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,19 +0,0 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd("bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,86 +0,0 @@
package app
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
bridgeConfig "scroll-tech/bridge/config"
)
// BridgeApp bridge-test client manager.
type BridgeApp struct {
Config *bridgeConfig.Config
base *docker.App
originFile string
bridgeFile string
name string
args []string
docker.AppAPI
}
// NewBridgeApp return a new bridgeApp manager.
func NewBridgeApp(base *docker.App, file string) *BridgeApp {
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &BridgeApp{
base: base,
name: "bridge-test",
originFile: file,
bridgeFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
}
if err := bridgeApp.MockConfig(true); err != nil {
panic(err)
}
return bridgeApp
}
// RunApp run bridge-test child process by multi parameters.
func (b *BridgeApp) RunApp(t *testing.T, args ...string) {
b.AppAPI = cmd.NewCmd("bridge-test", append(b.args, args...)...)
b.AppAPI.RunApp(func() bool { return b.AppAPI.WaitResult(t, time.Second*20, "Start bridge successfully") })
}
// Free stop and release bridge-test.
func (b *BridgeApp) Free() {
if !utils.IsNil(b.AppAPI) {
b.AppAPI.WaitExit()
}
_ = os.Remove(b.bridgeFile)
}
// MockConfig creates a new bridge config.
func (b *BridgeApp) MockConfig(store bool) error {
base := b.base
// Load origin bridge config file.
cfg, err := bridgeConfig.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
b.Config = cfg
if !store {
return nil
}
// Store changed bridge config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.bridgeFile, data, 0600)
}

View File

@@ -1,31 +0,0 @@
package app
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
)

View File

@@ -0,0 +1,114 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,136 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/app"
func main() {
app.Run()
}

106
bridge/cmd/mock_app.go Normal file
View File

@@ -0,0 +1,106 @@
package app
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/bridge/config"
)
// MockApp mockApp-test client manager.
type MockApp struct {
Config *config.Config
base *docker.App
mockApps map[utils.MockAppName]docker.AppAPI
originFile string
bridgeFile string
args []string
}
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
func NewBridgeApp(base *docker.App, file string) *MockApp {
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originFile: file,
bridgeFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
}
if err := bridgeApp.MockConfig(true); err != nil {
panic(err)
}
return bridgeApp
}
// RunApp run bridge-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp ||
name == utils.GasOracleApp ||
name == utils.MessageRelayerApp ||
name == utils.RollupRelayerApp) {
t.Errorf(fmt.Sprintf("Don't support the mock app, name: %s", name))
return
}
if app, ok := b.mockApps[name]; ok {
t.Logf(fmt.Sprintf("%s already exist, free the current and recreate again", string(name)))
app.WaitExit()
}
appAPI := cmd.NewCmd(string(name), append(b.args, args...)...)
keyword := fmt.Sprintf("Start %s successfully", string(name)[:len(string(name))-len("-test")])
appAPI.RunApp(func() bool { return appAPI.WaitResult(t, time.Second*20, keyword) })
b.mockApps[name] = appAPI
}
// WaitExit wait util all processes exit.
func (b *MockApp) WaitExit() {
for _, app := range b.mockApps {
app.WaitExit()
}
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
}
// Free stop and release bridge mocked apps.
func (b *MockApp) Free() {
b.WaitExit()
_ = os.Remove(b.bridgeFile)
}
// MockConfig creates a new bridge config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin bridge config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
b.Config = cfg
if !store {
return nil
}
// Store changed bridge config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.bridgeFile, data, 0600)
}

View File

@@ -0,0 +1,118 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/msg_relayer/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,133 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run rollup relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -19,7 +19,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -27,13 +28,10 @@
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121213"
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121215"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121214"
"1212121212121212121212121212121212121212121212121212121212121212"
]
}
},
@@ -56,7 +54,8 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -64,13 +63,13 @@
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121213"
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121215"
"1212121212121212121212121212121212121212121212121212121212121212"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121214"
"1212121212121212121212121212121212121212121212121212121212121212"
]
},
"batch_proposer_config": {

View File

@@ -1,4 +1,4 @@
package config_test
package config
import (
"encoding/json"
@@ -8,30 +8,48 @@ import (
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
)
func TestConfig(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.True(t, assert.NoError(t, err), "failed to load config")
t.Run("Success Case", func(t *testing.T) {
cfg, err := NewConfig("../config.json")
assert.NoError(t, err, "failed to load config")
assert.Equal(t, 1, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys))
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys))
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys))
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
assert.Len(t, cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys, 1)
data, err := json.Marshal(cfg)
assert.NoError(t, err)
data, err := json.Marshal(cfg)
assert.NoError(t, err)
tmpJosn := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() { _ = os.Remove(tmpJosn) }()
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() { _ = os.Remove(tmpJSON) }()
assert.NoError(t, os.WriteFile(tmpJosn, data, 0644))
assert.NoError(t, os.WriteFile(tmpJSON, data, 0644))
cfg2, err := config.NewConfig(tmpJosn)
assert.NoError(t, err)
cfg2, err := NewConfig(tmpJSON)
assert.NoError(t, err)
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
})
t.Run("File Not Found", func(t *testing.T) {
_, err := NewConfig("non_existent_file.json")
assert.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("Invalid JSON Content", func(t *testing.T) {
// Create a temporary file with invalid JSON content
tempFile, err := os.CreateTemp("", "invalid_json_config.json")
assert.NoError(t, err)
defer os.Remove(tempFile.Name())
_, err = tempFile.WriteString("{ invalid_json: ")
assert.NoError(t, err)
_, err = NewConfig(tempFile.Name())
assert.Error(t, err)
})
}

View File

@@ -30,9 +30,11 @@ type SenderConfig struct {
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
MinBalance *big.Int `json:"min_balance"`
// The interval (in seconds) to check balance and top up sender's accounts
CheckBalanceTime uint64 `json:"check_balance_time"`
// The sender's pending count limit.
PendingLimit int `json:"pending_limit,omitempty"`
}
// RelayerConfig loads relayer configuration items.

View File

@@ -3,8 +3,11 @@ module scroll-tech/bridge
go 1.18
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0
@@ -20,22 +23,30 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.1.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@@ -1,5 +1,7 @@
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -33,13 +35,15 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
@@ -47,6 +51,8 @@ github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBe
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -55,8 +61,10 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
@@ -66,15 +74,21 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -84,6 +98,12 @@ github.com/scroll-tech/zktrie v0.5.2 h1:U34jPXMLGOlRHfdvYp5VVgOcC0RuPeJmcS3bWotC
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -93,10 +113,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
@@ -105,20 +125,27 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -1,55 +0,0 @@
package l1
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L1 backend.
// The backend should monitor events in layer 1 and relay transactions to layer 2
type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
orm: orm,
}, nil
}
// Start Backend module.
func (l1 *Backend) Start() error {
l1.watcher.Start()
l1.relayer.Start()
return nil
}
// Stop Backend module.
func (l1 *Backend) Stop() {
l1.watcher.Stop()
l1.relayer.Stop()
}

View File

@@ -1,33 +0,0 @@
package l1
import (
"testing"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
var err error
cfg, err = config.NewConfig("../config.json")
if err != nil {
panic(err)
}
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
m.Run()
base.Free()
}

View File

@@ -1,29 +0,0 @@
package l1
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/database/migrate"
"scroll-tech/database"
)
// TestCreateNewL1Relayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
// Start docker containers.
base.RunImages(t)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
}

View File

@@ -1,32 +0,0 @@
package l1
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func TestStartWatcher(t *testing.T) {
// Start docker containers.
base.RunImages(t)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := base.L1Client()
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
time.Sleep(time.Millisecond * 500)
defer watcher.Stop()
}

View File

@@ -1,76 +0,0 @@
package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L2 backend.
// The backend should monitor events in layer 2 and relay transactions to layer 1
type Backend struct {
cfg *config.L2Config
watcher *WatcherClient
relayer *Layer2Relayer
batchProposer *BatchProposer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
// Note: initialize watcher before relayer to keep DB consistent.
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, orm)
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
batchProposer: batchProposer,
orm: orm,
}, nil
}
// Start Backend module.
func (l2 *Backend) Start() error {
l2.watcher.Start()
l2.relayer.Start()
l2.batchProposer.Start()
return nil
}
// Stop Backend module.
func (l2 *Backend) Stop() {
l2.batchProposer.Stop()
l2.relayer.Stop()
l2.watcher.Stop()
}
// APIs collect API modules.
func (l2 *Backend) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "l2",
Version: "1.0",
Service: WatcherAPI(l2.watcher),
Public: true,
},
}
}

View File

@@ -1,122 +0,0 @@
package l2
import (
"context"
"fmt"
"math"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/common/types"
)
func TestBatchProposerProposeBatch(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
wc.Start()
defer wc.Stop()
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func TestBatchProposerGracefulRestart(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -1,99 +0,0 @@
package l2
import (
"fmt"
"golang.org/x/sync/errgroup"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/vm"
"github.com/scroll-tech/go-ethereum/log"
)
//nolint:unused
func blockTraceIsValid(trace *types.BlockTrace) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
}
flag := true
for _, tx := range trace.ExecutionResults {
flag = structLogResIsValid(tx.StructLogs) && flag
}
return flag
}
//nolint:unused
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
txLog := txLogs[i]
flag := true
switch vm.StringToOp(txLog.Op) {
case vm.CALL, vm.CALLCODE:
flag = codeIsValid(txLog, 2) && flag
flag = stateIsValid(txLog, 2) && flag
case vm.DELEGATECALL, vm.STATICCALL:
flag = codeIsValid(txLog, 2) && flag
case vm.CREATE, vm.CREATE2:
flag = stateIsValid(txLog, 1) && flag
case vm.SLOAD, vm.SSTORE, vm.SELFBALANCE:
flag = stateIsValid(txLog, 1) && flag
case vm.SELFDESTRUCT:
flag = stateIsValid(txLog, 2) && flag
case vm.EXTCODEHASH, vm.BALANCE:
flag = stateIsValid(txLog, 1) && flag
}
res = res && flag
}
return res
}
//nolint:unused
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.CodeList) < n {
log.Warn("code list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.CodeList))
return false
}
return true
}
//nolint:unused
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.StateList) < n {
log.Warn("stateList list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.StateList))
return false
}
return true
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
if trace == nil {
return false
}
eg := errgroup.Group{}
for _, res := range trace.ExecutionResults {
res := res
eg.Go(func() error {
for _, lg := range res.StructLogs {
if _, ok := opcodes[lg.Op]; ok {
return fmt.Errorf("unsupported opcde: %s", lg.Op)
}
}
return nil
})
}
err := eg.Wait()
return err != nil
}

View File

@@ -1,72 +0,0 @@
package l2
import (
"testing"
"scroll-tech/common/testdata"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
base *docker.App
// block trace
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
// batch data
batchData1 *types.BatchData
batchData2 *types.BatchData
)
func init() {
trace02 := testdata.GetTrace("../../common/testdata/blockTrace_02.json")
wrappedBlock1 = &types.WrappedBlock{
Header: trace02.Header,
Transactions: trace02.Transactions,
WithdrawTrieRoot: trace02.WithdrawTrieRoot,
}
batchData1 = types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
}, []*types.WrappedBlock{wrappedBlock1}, nil)
trace03 := testdata.GetTrace("../../common/testdata/blockTrace_03.json")
wrappedBlock2 = &types.WrappedBlock{
Header: trace03.Header,
Transactions: trace03.Transactions,
WithdrawTrieRoot: trace03.WithdrawTrieRoot,
}
batchData2 = types.NewBatchData(&types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}, []*types.WrappedBlock{wrappedBlock2}, nil)
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
if err != nil {
panic(err)
}
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
m.Run()
base.Free()
}

View File

@@ -1,5 +0,0 @@
package l2
// WatcherAPI watcher api service
type WatcherAPI interface {
}

View File

@@ -1,218 +0,0 @@
package l2
import (
"context"
"crypto/ecdsa"
"math/big"
"strconv"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func TestCreateNewWatcherAndStop(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
rc.Start()
defer rc.Stop()
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func TestMonitorBridgeContract(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
wc.Start()
defer wc.Stop()
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
func TestFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *geth_types.Transaction
for i := 0; i < numTransactions; i++ {
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
assert.NoError(t, err)
return auth
}

View File

@@ -1,10 +1,9 @@
package l1
package relayer
import (
"context"
"errors"
"math/big"
"time"
// not sure if this will make problems when relay with l1geth
@@ -15,7 +14,6 @@ import (
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
@@ -31,14 +29,6 @@ var (
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -53,11 +43,9 @@ type Layer1Relayer struct {
// channel used to communicate with transaction sender
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l2MessengerABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l1GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -65,8 +53,6 @@ type Layer1Relayer struct {
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
stopCh chan struct{}
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -96,21 +82,19 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer1Relayer{
l1Relayer := &Layer1Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -118,9 +102,11 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
stopCh: make(chan struct{}),
}, nil
cfg: cfg,
}
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
@@ -138,7 +124,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
@@ -153,7 +139,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
@@ -203,7 +189,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
@@ -220,61 +206,43 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
}
// Start the relayer process
func (r *Layer1Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go utils.Loop(ctx, 2*time.Second, r.ProcessSavedEvents)
go utils.Loop(ctx, 2*time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageCh:
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer1Relayer) Stop() {
close(r.stopCh)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}

View File

@@ -0,0 +1,155 @@
package relayer
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/sender"
"scroll-tech/database"
)
var (
templateL1Message = []*types.L1Message{
{
QueueIndex: 1,
MsgHash: "msg_hash1",
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash0",
},
{
QueueIndex: 2,
MsgHash: "msg_hash2",
Height: 2,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash1",
},
}
)
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
func testL1RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NoError(t, db.SaveL1Messages(context.Background(), templateL1Message))
relayer.ProcessSavedEvents()
msg1, err := db.GetL1MessageByQueueIndex(1)
assert.NoError(t, err)
assert.Equal(t, msg1.Status, types.MsgSubmitted)
msg2, err := db.GetL1MessageByQueueIndex(2)
assert.NoError(t, err)
assert.Equal(t, msg2.Status, types.MsgSubmitted)
}
func testL1RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL1Messages(context.Background(),
[]*types.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}))
// Create and set up the Layer1 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-1",
IsSuccessful: true,
})
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1MessageByMsgHash("msg-1")
msg2, err2 := db.GetL1MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed
})
}
func testL1RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.InsertL1Blocks(context.Background(),
[]*types.L1BlockInfo{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}))
// Create and set up the Layer2 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-1",
IsSuccessful: true,
})
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && msg1[0].GasOracleStatus == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && msg2[0].GasOracleStatus == types.GasOracleFailed
})
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer
import (
"context"
@@ -7,7 +7,6 @@ import (
"math/big"
"runtime"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -18,12 +17,10 @@ import (
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
@@ -41,14 +38,6 @@ var (
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultMessageRelayMinGasLimit = 200000 // should be enough for both ERC20 and ETH relay
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -64,15 +53,12 @@ type Layer2Relayer struct {
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l2GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
@@ -92,8 +78,6 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -127,27 +111,24 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultMessageRelayMinGasLimit)
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
return &Layer2Relayer{
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
l2Client: l2Client,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.ScrollChainABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -159,8 +140,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
const processMsgLimit = 100
@@ -199,7 +181,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process l2 saved event", "err", err)
}
return
@@ -248,11 +230,11 @@ func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
@@ -298,7 +280,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
@@ -344,7 +326,7 @@ func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
txID := crypto.Keccak256Hash(bytes).String()
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
}
return err
@@ -463,12 +445,12 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByHash(hash)
proofBuffer, icBuffer, err := r.db.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
if err != nil {
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
if proofBuffer == nil || icBuffer == nil {
log.Warn("proof or instance not ready", "hash", hash)
return
}
@@ -476,13 +458,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(instanceBuffer))
if len(icBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
return
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
instance := utils.BufferToUint256Le(icBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
@@ -494,7 +476,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
}
return
@@ -517,53 +499,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
// Start the relayer process
func (r *Layer2Relayer) Start() {
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go cutil.Loop(ctx, time.Second, r.ProcessSavedEvents)
go cutil.Loop(ctx, time.Second, r.ProcessCommittedBatches)
go cutil.Loop(ctx, time.Second, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
@@ -627,3 +562,32 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -1,4 +1,4 @@
package l2
package relayer
import (
"context"
@@ -13,6 +13,9 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/bridge/sender"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -32,12 +35,7 @@ var (
}
)
func TestCreateNewRelayer(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
func testCreateNewRelayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
@@ -46,17 +44,10 @@ func TestCreateNewRelayer(t *testing.T) {
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
assert.NotNil(t, relayer)
}
func TestL2RelayerProcessSaveEvents(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
@@ -66,7 +57,6 @@ func TestL2RelayerProcessSaveEvents(t *testing.T) {
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
@@ -89,6 +79,12 @@ func TestL2RelayerProcessSaveEvents(t *testing.T) {
}
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -106,12 +102,7 @@ func TestL2RelayerProcessSaveEvents(t *testing.T) {
assert.Equal(t, types.MsgSubmitted, msg.Status)
}
func TestL2RelayerProcessCommittedBatches(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
@@ -121,8 +112,13 @@ func TestL2RelayerProcessCommittedBatches(t *testing.T) {
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
@@ -147,12 +143,7 @@ func TestL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, status)
}
func TestL2RelayerSkipBatches(t *testing.T) {
// Start docker containers.
base.RunImages(t)
l2Cli, err := base.L2Client()
assert.NoError(t, err)
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
@@ -162,7 +153,6 @@ func TestL2RelayerSkipBatches(t *testing.T) {
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
@@ -218,6 +208,166 @@ func TestL2RelayerSkipBatches(t *testing.T) {
}
}
func testL2RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL2Messages(context.Background(), []*types.L2Message{
{MsgHash: "msg-1", Nonce: 0}, {MsgHash: "msg-2", Nonce: 1},
}))
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l2Relayer.processingMessage.Store("msg-1", "msg-1")
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-1",
IsSuccessful: true,
})
l2Relayer.processingMessage.Store("msg-2", "msg-2")
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL2MessageByMsgHash("msg-1")
msg2, err2 := db.GetL2MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed
})
}
func testL2RelayerRollupConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
batches := make([]*types.BatchData, 6)
for i := 0; i < 6; i++ {
batches[i] = genBatchData(t, uint64(i))
}
dbTx, err := db.Beginx()
assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
isSuccessful := []bool{true, false, true, false}
for i, key := range processingKeys[:2] {
batchHashes := []string{batches[i*2].Hash().Hex(), batches[i*2+1].Hash().Hex()}
l2Relayer.processingBatchesCommitment.Store(key, batchHashes)
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i],
})
}
for i, key := range processingKeys[2:] {
batchHash := batches[i+4].Hash().Hex()
l2Relayer.processingFinalization.Store(key, batchHash)
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i+2],
TxHash: common.HexToHash("0x56789abcdef1234"),
})
}
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
expectedStatuses := []types.RollupStatus{
types.RollupCommitted,
types.RollupCommitted,
types.RollupCommitFailed,
types.RollupCommitFailed,
types.RollupFinalized,
types.RollupFinalizeFailed,
}
for i, batch := range batches[:6] {
batchInDB, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
if err != nil || len(batchInDB) != 1 || batchInDB[0].RollupStatus != expectedStatuses[i] {
return false
}
}
return true
})
}
func testL2RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
batches := make([]*types.BatchData, 2)
for i := 0; i < 2; i++ {
batches[i] = genBatchData(t, uint64(i))
}
dbTx, err := db.Beginx()
assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
isSuccessful := []bool{true, false}
for i, batch := range batches {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: batch.Hash().Hex(),
IsSuccessful: isSuccessful[i],
})
}
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, batch := range batches {
gasOracle, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
if err != nil || len(gasOracle) != 1 || gasOracle[0].OracleStatus != expectedStatuses[i] {
return false
}
}
return true
})
}
func genBatchData(t *testing.T, index uint64) *types.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)

11
bridge/relayer/params.go Normal file
View File

@@ -0,0 +1,11 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

View File

@@ -0,0 +1,113 @@
package relayer
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
base *docker.App
// l2geth client
l2Cli *ethclient.Client
// block trace
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
// batch data
batchData1 *types.BatchData
batchData2 *types.BatchData
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
}
batchData1 = types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 = types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())
return err
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestFunctions(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 relayer test cases.
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerMsgConfirm", testL2RelayerMsgConfirm)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
}

View File

@@ -23,10 +23,6 @@ type accountPool struct {
// newAccounts creates an accountPool instance.
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
if minBalance == nil {
minBalance = big.NewInt(0)
minBalance.SetString("100000000000000000000", 10)
}
accs := &accountPool{
client: client,
minBalance: minBalance,

View File

@@ -6,21 +6,19 @@ import (
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/utils"
)
const (
@@ -37,6 +35,12 @@ const (
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
// ErrFullPending sender's pending pool is full.
ErrFullPending = errors.New("sender's pending pool is full")
)
var (
defaultPendingLimit = 10
)
// Confirmation struct used to indicate transaction confirmation details
@@ -74,9 +78,9 @@ type Sender struct {
// account fields.
auths *accountPool
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
stopCh chan struct{}
@@ -116,6 +120,11 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
}
}
// initialize pending limit with a default value
if config.PendingLimit == 0 {
config.PendingLimit = defaultPendingLimit
}
sender := &Sender{
ctx: ctx,
config: config,
@@ -125,7 +134,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: sync.Map{},
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
}
@@ -134,6 +143,21 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return sender, nil
}
// PendingCount returns the current number of pending txs.
func (s *Sender) PendingCount() int {
return s.pendingTxs.Count()
}
// PendingLimit returns the maximum number of pending txs the sender can handle.
func (s *Sender) PendingLimit() int {
return s.config.PendingLimit
}
// IsFull returns true if the sender's pending tx pool is full.
func (s *Sender) IsFull() bool {
return s.pendingTxs.Count() >= s.config.PendingLimit
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
@@ -145,6 +169,12 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
return s.confirmCh
}
// SendConfirmation sends a confirmation to the confirmation channel.
// Note: This function is only used in tests.
func (s *Sender) SendConfirmation(cfm *Confirmation) {
s.confirmCh <- cfm
}
// NumberOfAccounts return the count of accounts.
func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
@@ -159,21 +189,24 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
if s.IsFull() {
return common.Hash{}, ErrFullPending
}
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
s.pendingTxs.Remove(ID) // release the ID on failure
}
}()
@@ -194,7 +227,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Store(ID, pending)
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
}
@@ -335,17 +368,17 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
s.pendingTxs.Range(func(key, value interface{}) bool {
for item := range s.pendingTxs.IterBuffered() {
key, pending := item.Key, item.Val
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
return true
if pending == nil {
continue
}
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
@@ -376,7 +409,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
s.pendingTxs.Remove(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
@@ -398,8 +431,7 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
pending.submitAt = number
}
}
return true
})
}
}
// Loop is the main event loop

View File

@@ -1,4 +1,4 @@
package sender_test
package sender
import (
"context"
@@ -13,6 +13,7 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
@@ -21,7 +22,6 @@ import (
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
)
const TXBatch = 50
@@ -30,6 +30,7 @@ var (
privateKeys []*ecdsa.PrivateKey
cfg *config.Config
base *docker.App
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
)
func TestMain(m *testing.M) {
@@ -50,107 +51,166 @@ func setupEnv(t *testing.T) {
// Load default private key.
privateKeys = []*ecdsa.PrivateKey{priv}
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
}
func TestSender(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test min gas limit", func(t *testing.T) { testMinGasLimit(t) })
t.Run("test new sender", testNewSender)
t.Run("test pending limit", testPendLimit)
t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", func(t *testing.T) { testResubmitTransaction(t) })
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
}
func testNewSender(t *testing.T) {
for _, txType := range txTypes {
// exit by Stop()
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKeys)
assert.NoError(t, err)
newSender1.Stop()
// exit by ctx.Done()
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKeys)
assert.NoError(t, err)
cancel()
}
}
func testPendLimit(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = 2
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
for i := 0; i < 2*newSender.PendingLimit(); i++ {
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
assert.True(t, err == nil || (err != nil && err.Error() == "sender's pending pool is full"))
}
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
newSender.Stop()
}
}
func testMinGasLimit(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
assert.NoError(t, err)
defer newSender.Stop()
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
client, err := ethclient.Dial(senderCfg.Endpoint)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
assert.NoError(t, err)
// MinGasLimit = 0
txHash0, err := newSender.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
assert.Greater(t, tx0.Gas(), uint64(0))
// MinGasLimit = 0
txHash0, err := newSender.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
assert.Greater(t, tx0.Gas(), uint64(0))
// MinGasLimit = 100000
txHash1, err := newSender.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
assert.Equal(t, tx1.Gas(), uint64(150000))
// MinGasLimit = 100000
txHash1, err := newSender.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
assert.Equal(t, tx1.Gas(), uint64(150000))
newSender.Stop()
}
}
func testResubmitTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
_, err = s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
s.Stop()
}
}
func testBatchSender(t *testing.T, batchSize int) {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()
if err != nil {
t.Fatal(err)
for _, txType := range txTypes {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()
assert.NoError(t, err)
privateKeys = append(privateKeys, priv)
}
privateKeys = append(privateKeys, priv)
}
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
}
defer newSender.Stop()
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = batchSize * TXBatch
cfgCopy.TxType = txType
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
// send transactions
var (
eg errgroup.Group
idCache = cmap.New()
confirmCh = newSender.ConfirmChan()
)
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, sender.ErrNoAvailableAccount) {
<-time.After(time.Second)
continue
// send transactions
var (
eg errgroup.Group
idCache = cmap.New()
confirmCh = newSender.ConfirmChan()
)
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, ErrNoAvailableAccount) || errors.Is(err, ErrFullPending) {
<-time.After(time.Second)
continue
}
assert.NoError(t, err)
idCache.Set(id, struct{}{})
}
if err != nil {
return err
}
idCache.Set(id, struct{}{})
}
return nil
})
}
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)
for {
select {
case cmsg := <-confirmCh:
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case <-after:
t.Error("newSender test failed because of timeout")
return
return nil
})
}
assert.NoError(t, eg.Wait())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)
isDone := false
for !isDone {
select {
case cmsg := <-confirmCh:
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
isDone = true
}
case <-after:
t.Error("newSender test failed because of timeout")
isDone = true
}
}
newSender.Stop()
}
}

View File

@@ -11,9 +11,9 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/cmd/app"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
@@ -26,9 +26,7 @@ var (
// private key
privateKey *ecdsa.PrivateKey
bridgeApp *app.BridgeApp
base *docker.App
base *docker.App
// clients
l1Client *ethclient.Client
@@ -53,32 +51,99 @@ var (
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = app.NewBridgeApp(base, "../config.json")
// Load config.
cfg = bridgeApp.Config
m.Run()
bridgeApp.Free()
base.Free()
}
func setupEnv(t *testing.T) {
// Start l1geth l2geth and postgres docker containers.
base.RunImages(t)
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
// Create l1geth and l2geth client.
l1Client, err = base.L1Client()
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
l2Client, err = base.L2Client()
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
gasOraclePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121215"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L1Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
base.RunImages(t)
// Create l1geth container.
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
// Create l2geth container.
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
// Create db container.
cfg.DBConfig = base.DBConfig
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
assert.NoError(t, err)
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
// Create l1 and l2 auth
l1Auth = prepareAuth(t, base.L1gethImg.ChainID(), privateKey)
l2Auth = prepareAuth(t, base.L2gethImg.ChainID(), privateKey)
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l1Auth, l1Client, gasOraclePrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, gasOraclePrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func prepareContracts(t *testing.T) {
@@ -116,7 +181,9 @@ func prepareContracts(t *testing.T) {
cfg.L2Config.RelayerConfig.GasPriceOracleContractAddress = l2MessengerAddress
}
func prepareAuth(t *testing.T, chainID *big.Int, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei

View File

@@ -11,8 +11,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,14 +30,13 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := cfg.L1Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -81,9 +80,8 @@ func testImportL2GasPrice(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// add fake blocks
traces := []*types.WrappedBlock{

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,16 +33,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := cfg.L2Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -56,7 +54,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
}
// l1 watch process events
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l1Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
@@ -79,7 +77,7 @@ func testRelayL1MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed)

View File

@@ -13,8 +13,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -33,15 +33,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
// Create L2Watcher
confirmations := rpc.LatestBlockNumber
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -55,7 +55,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
l2Watcher.FetchContractEvent()
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
@@ -123,7 +123,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -144,7 +144,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -165,7 +165,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)

View File

@@ -12,8 +12,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/watcher"
"scroll-tech/database"
"scroll-tech/database/migrate"
@@ -30,13 +30,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var wrappedBlocks []*types.WrappedBlock
@@ -96,7 +95,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
@@ -126,7 +125,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)

View File

@@ -1,4 +1,4 @@
package utils_test
package utils
import (
"context"
@@ -11,8 +11,6 @@ import (
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/bridge/utils"
)
var (
@@ -65,15 +63,14 @@ func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.Nil(t, err)
assert.NoError(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.Nil(t, err)
assert.NoError(t, err)
got, err := json.Marshal(&num)
assert.Nil(t, err)
assert.NoError(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
@@ -88,20 +85,50 @@ func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
var blockNumber int64
switch number.Int64() {
case int64(rpc.LatestBlockNumber):
blockNumber = int64(e.val)
case int64(rpc.SafeBlockNumber):
blockNumber = int64(e.val) - 6
case int64(rpc.FinalizedBlockNumber):
blockNumber = int64(e.val) - 12
default:
blockNumber = number.Int64()
}
if blockNumber < 0 {
blockNumber = 0
}
return &types.Header{Number: new(big.Int).SetInt64(blockNumber)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
client.val = 5
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(0), confirmed)
testCases := []struct {
blockNumber uint64
confirmation rpc.BlockNumber
expectedResult uint64
}{
{5, 6, 0},
{7, 6, 1},
{10, 2, 8},
{0, 1, 0},
{3, 0, 3},
{15, 15, 0},
{16, rpc.SafeBlockNumber, 10},
{22, rpc.FinalizedBlockNumber, 10},
{10, rpc.LatestBlockNumber, 10},
{5, rpc.SafeBlockNumber, 0},
{11, rpc.FinalizedBlockNumber, 0},
}
client.val = 7
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(1), confirmed)
for _, testCase := range testCases {
client.val = testCase.blockNumber
confirmed, err := GetLatestConfirmedBlockNumber(ctx, &client, testCase.confirmation)
assert.NoError(t, err)
assert.Equal(t, testCase.expectedResult, confirmed)
}
}

View File

@@ -29,19 +29,6 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
func BufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
@@ -76,23 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
// @todo: add unit test.
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}

View File

@@ -1,34 +1,32 @@
package utils_test
package utils
import (
"math/big"
"testing"
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
hash := Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
hash = Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
hash = Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
hash := ComputeMessageHash(
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
big.NewInt(0),
@@ -37,3 +35,15 @@ func TestComputeMessageHash(t *testing.T) {
)
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
}
func TestBufferToUint256Le(t *testing.T) {
input := []byte{
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
expectedOutput := []*big.Int{big.NewInt(1)}
result := BufferToUint256Le(input)
assert.Equal(t, expectedOutput, result)
}

View File

@@ -1,10 +1,9 @@
package l2
package watcher
import (
"context"
"fmt"
"math"
"reflect"
"sync"
"time"
@@ -13,22 +12,22 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database"
bridgeabi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
)
var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commit/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/blocks/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/txs/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/gas/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
@@ -83,15 +82,13 @@ type BatchProposer struct {
proofGenerationFreq uint64
batchDataBuffer []*types.BatchData
relayer *Layer2Relayer
relayer *relayer.Layer2Relayer
piCfg *types.PublicInputHashConfig
stopCh chan struct{}
}
// NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, orm database.OrmFactory) *BatchProposer {
p := &BatchProposer{
mutex: sync.Mutex{},
ctx: ctx,
@@ -107,42 +104,17 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
proofGenerationFreq: cfg.ProofGenerationFreq,
piCfg: cfg.PublicInputConfig,
relayer: relayer,
stopCh: make(chan struct{}),
}
// for graceful restart.
p.recoverBatchDataBuffer()
// try to commit the leftover pending batches
p.tryCommitBatches()
p.TryCommitBatches()
return p
}
// Start the Listening process
func (p *BatchProposer) Start() {
go func() {
if reflect.ValueOf(p.orm).IsNil() {
panic("must run BatchProposer with DB")
}
ctx, cancel := context.WithCancel(p.ctx)
go utils.Loop(ctx, 2*time.Second, func() {
p.tryProposeBatch()
p.tryCommitBatches()
})
<-p.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (p *BatchProposer) Stop() {
p.stopCh <- struct{}{}
}
func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
@@ -214,7 +186,8 @@ func (p *BatchProposer) recoverBatchDataBuffer() {
}
}
func (p *BatchProposer) tryProposeBatch() {
// TryProposeBatch will try to propose a batch.
func (p *BatchProposer) TryProposeBatch() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -243,7 +216,8 @@ func (p *BatchProposer) tryProposeBatch() {
}
}
func (p *BatchProposer) tryCommitBatches() {
// TryCommitBatches will try to commit the pending batches.
func (p *BatchProposer) TryCommitBatches() {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -283,7 +257,7 @@ func (p *BatchProposer) tryCommitBatches() {
log.Error("SendCommitTx failed", "error", err)
} else {
// pop the processed batches from the buffer
bridgeL2BatchesCommitTotalCounter.Inc(1)
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
p.batchDataBuffer = p.batchDataBuffer[index:]
}
}
@@ -293,15 +267,49 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
return false
}
approximatePayloadSize := func(hash string) (uint64, error) {
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
if err != nil {
return 0, err
}
if len(traces) != 1 {
return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces))
}
size := 0
for _, tx := range traces[0].Transactions {
size += len(tx.Data)
}
return uint64(size), nil
}
firstSize, err := approximatePayloadSize(blocks[0].Hash)
if err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
if firstSize > p.commitCalldataSizeLimit {
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize)
// note: we should probably fail here once we can ensure this will not happen
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
return false
}
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
return true
}
if blocks[0].GasUsed > p.batchGasThreshold {
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
@@ -312,24 +320,31 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
var gasUsed, txNum uint64
var gasUsed, txNum, payloadSize uint64
reachThreshold := false
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
size, err := approximatePayloadSize(block.Hash)
if err != nil {
log.Error("failed to create batch", "number", block.Number, "err", err)
return false
}
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
blocks = blocks[:i]
reachThreshold = true
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
payloadSize += size
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
@@ -342,9 +357,9 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(txNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks)))
}
return true

View File

@@ -0,0 +1,199 @@
package watcher
import (
"context"
"fmt"
"math"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/common/types"
)
func testBatchProposerProposeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
p := &BatchProposer{
batchGasThreshold: 1000,
batchTxNumThreshold: 10,
batchTimeSec: 300,
commitCalldataSizeLimit: 500,
orm: db,
}
patchGuard := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: longData,
}},
}}, nil
}
return []*types.WrappedBlock{{
Transactions: []*geth_types.TransactionData{{
Data: "short",
}},
}}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
block1 := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := &types.BlockInfo{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
block3 := &types.BlockInfo{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
block4 := &types.BlockInfo{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
blockOutdated := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
blockWithLongData := &types.BlockInfo{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
testCases := []struct {
description string
blocks []*types.BlockInfo
expectedRes bool
}{
{"Empty block list", []*types.BlockInfo{}, false},
{"Single block exceeding gas threshold", []*types.BlockInfo{block4}, true},
{"Single block exceeding transaction number threshold", []*types.BlockInfo{block3}, true},
{"Multiple blocks meeting thresholds", []*types.BlockInfo{block1, block2, block3}, true},
{"Multiple blocks not meeting thresholds", []*types.BlockInfo{block1, block2}, false},
{"Outdated and valid block", []*types.BlockInfo{blockOutdated, block2}, true},
{"Single block with long data", []*types.BlockInfo{blockWithLongData}, true},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
})
}
}
func testBatchProposerBatchGeneration(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
// Create a new batch.
batchData := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: batch.Hash,
StateRoot: batch.StateRoot,
}, []*types.WrappedBlock{wrappedBlock1}, nil)
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
proposer.TryProposeBatch()
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(batchData.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func testBatchProposerGracefulRestart(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock2}))
// Insert block batch into db.
batchData1 := types.NewBatchData(&types.BlockBatch{
Index: 0,
Hash: common.Hash{}.String(),
StateRoot: common.Hash{}.String(),
}, []*types.WrappedBlock{wrappedBlock1}, nil)
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
StateRoot: batchData1.Batch.NewStateRoot.String(),
}
batchData2 := types.NewBatchData(parentBatch2, []*types.WrappedBlock{wrappedBlock2}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

11
bridge/watcher/common.go Normal file
View File

@@ -0,0 +1,11 @@
package watcher
import "github.com/scroll-tech/go-ethereum/common"
const contractEventsBlocksFetchLimit = int64(10)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}

View File

@@ -1,9 +1,8 @@
package l1
package watcher
import (
"context"
"math/big"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -15,12 +14,10 @@ import (
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
@@ -34,20 +31,14 @@ var (
bridgeL1MsgsRollupEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
status types.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
// L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct {
ctx context.Context
client *ethclient.Client
db database.OrmFactory
@@ -68,13 +59,10 @@ type Watcher struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
stopCh chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *L1WatcherClient {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -93,9 +81,7 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
savedL1BlockHeight = startHeight
}
stopCh := make(chan bool)
return &Watcher{
return &L1WatcherClient{
ctx: ctx,
client: client,
db: db,
@@ -112,51 +98,29 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
stopCh: stopCh,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
go func() {
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchBlockHeader(number); err != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
}
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
if err := w.FetchContractEvent(number); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
}
})
<-w.stopCh
cancel()
}()
// ProcessedBlockHeight get processedBlockHeight
// Currently only use for unit test
func (w *L1WatcherClient) ProcessedBlockHeight() uint64 {
return w.processedBlockHeight
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stopCh <- true
// Confirmations get confirmations
// Currently only use for unit test
func (w *L1WatcherClient) Confirmations() rpc.BlockNumber {
return w.confirmations
}
const contractEventsBlocksFetchLimit = int64(10)
// SetConfirmations set the confirmations for L1WatcherClient
// Currently only use for unit test
func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
w.confirmations = confirmations
}
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -202,10 +166,15 @@ func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
func (w *L1WatcherClient) FetchContractEvent() error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return err
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -318,7 +287,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
func (w *L1WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -0,0 +1,514 @@
package watcher
import (
"context"
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
commonTypes "scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
return watcher, db
}
func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
assert.NoError(t, watcher.FetchContractEvent())
}
func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 {
blockHeight = 0
} else {
blockHeight = watcher.ProcessedBlockHeight() - 1
}
err := watcher.FetchBlockHeader(blockHeight)
assert.NoError(t, err)
})
convey.Convey("test get header from client error", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
return nil, ethereum.NotFound
})
defer patchGuard.Reset()
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.Error(t, err)
})
convey.Convey("insert l1 block error", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
return errors.New("insert failed")
})
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.Error(t, err)
})
convey.Convey("fetch block header success", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
return nil
})
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.NoError(t, err)
})
}
func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
return nil, ethereum.NotFound
})
defer patchGuard.Reset()
err := watcher.FetchContractEvent()
assert.Error(t, err)
})
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
Number: big.NewInt(100),
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
convey.Convey("filter logs failure", t, func() {
targetErr := errors.New("call filter failure")
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
return nil, targetErr
})
err := watcher.FetchContractEvent()
assert.EqualError(t, err, targetErr.Error())
})
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
return []types.Log{
{
Address: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
}, nil
})
convey.Convey("parse bridge event logs failure", t, func() {
targetErr := errors.New("parse log failure")
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
return nil, nil, nil, targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error())
})
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
rollupEvents := []rollupEvent{
{
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
status: commonTypes.RollupFinalized,
},
{
batchHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
status: commonTypes.RollupCommitted,
},
}
relayedMessageEvents := []relayedMessage{
{
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
isSuccessful: true,
},
{
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
isSuccessful: false,
},
}
return nil, relayedMessageEvents, rollupEvents, nil
})
convey.Convey("db get rollup status by hash list failure", t, func() {
targetErr := errors.New("get db failure")
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
return nil, targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error())
})
convey.Convey("rollup status mismatch batch hashes length", t, func() {
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{
commonTypes.RollupFinalized,
}
return s, nil
})
err := watcher.FetchContractEvent()
assert.NoError(t, err)
})
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{
commonTypes.RollupPending,
commonTypes.RollupCommitting,
}
return s, nil
})
convey.Convey("db update RollupFinalized status failure", t, func() {
targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure")
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil
})
convey.Convey("db update RollupCommitted status failure", t, func() {
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure")
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil
})
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return nil
})
convey.Convey("db save l1 message failure", t, func() {
targetErr := errors.New("SaveL1Messages failure")
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
return nil
})
convey.Convey("FetchContractEvent success", t, func() {
err := watcher.FetchContractEvent()
assert.NoError(t, err)
})
}
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1QueueTransactionEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack QueueTransaction log failure", t, func() {
targetErr := errors.New("UnpackLog QueueTransaction failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1QueueTransactionEventSignature success", t, func() {
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1QueueTransactionEvent)
tmpOut.QueueIndex = big.NewInt(100)
tmpOut.Data = []byte("test data")
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
tmpOut.Value = big.NewInt(1000)
tmpOut.Target = common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
tmpOut.GasLimit = big.NewInt(10)
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
assert.Len(t, l2Messages, 1)
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
})
}
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1CommitBatchEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack CommitBatch log failure", t, func() {
targetErr := errors.New("UnpackLog CommitBatch failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1CommitBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1CommitBatchEvent)
tmpOut.BatchHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
})
}
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1FinalizeBatchEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FinalizeBatch log failure", t, func() {
targetErr := errors.New("UnpackLog FinalizeBatch failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1FinalizeBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FinalizeBatchEvent)
tmpOut.BatchHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
})
}

View File

@@ -1,12 +1,10 @@
package l2
package watcher
import (
"context"
"errors"
"fmt"
"math/big"
"reflect"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
@@ -19,11 +17,10 @@ import (
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
cutil "scroll-tech/common/utils"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
@@ -32,22 +29,16 @@ import (
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/gap", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
event.Feed
@@ -68,18 +59,17 @@ type WatcherClient struct {
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, orm database.OrmFactory) *L2WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
w := WatcherClient{
w := L2WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
@@ -93,7 +83,6 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
messageQueueABI: bridge_abi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopCh: make(chan struct{}),
stopped: 0,
}
@@ -105,7 +94,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
return &w
}
func (w *WatcherClient) initializeGenesis() error {
func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
@@ -143,46 +132,10 @@ func (w *WatcherClient) initializeGenesis() error {
return nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
}
ctx, cancel := context.WithCancel(w.ctx)
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.tryFetchRunningMissingBlocks(ctx, number)
}
})
go cutil.LoopWithContext(ctx, 2*time.Second, func(subCtx context.Context) {
number, err := utils.GetLatestConfirmedBlockNumber(subCtx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
} else {
w.FetchContractEvent(number)
}
})
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
@@ -210,8 +163,8 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2TracesFetchedHeightGauge.Update(int64(to))
bridgeL2TracesFetchedGapGauge.Update(int64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
@@ -238,7 +191,7 @@ func txsToTxsData(txs geth_types.Transactions) []*geth_types.TransactionData {
return txsData
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock
for number := from; number <= to; number++ {
@@ -271,14 +224,18 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
func (w *L2WatcherClient) FetchContractEvent() {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
@@ -354,7 +311,7 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
}
}
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
func (w *L2WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up

View File

@@ -0,0 +1,442 @@
package watcher
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
cutils "scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func setupL2Watcher(t *testing.T) *L2WatcherClient {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher
}
func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
l2db.Close()
}()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
loopToFetchEvent(subCtx, wc)
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func testMonitorBridgeContract(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
ctx := context.Background()
subCtx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
db.Close()
}()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
wc := prepareWatcherClient(l2Cli, db, address)
loopToFetchEvent(subCtx, wc)
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *geth_types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func testFetchRunningMissingBlocks(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
// wait for dealing time
<-time.After(3 * time.Second)
latestHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight)
fetchedHeight, err := db.GetL2BlocksLatestHeight()
assert.NoError(t, err)
assert.Equal(t, uint64(fetchedHeight), latestHeight)
}
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
assert.NoError(t, err)
return auth
}
func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{
bridge_abi.L2SentMessageEventSignature,
},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack SentMessage log failure", t, func() {
targetErr := errors.New("UnpackLog SentMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2SentMessageEventSignature success", t, func() {
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
tmpValue := big.NewInt(1000)
tmpMessageNonce := big.NewInt(100)
tmpMessage := []byte("test for L2SentMessageEventSignature")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2SentMessageEvent)
tmpOut.Sender = tmpSendAddr
tmpOut.Value = tmpValue
tmpOut.Target = tmpTargetAddr
tmpOut.MessageNonce = tmpMessageNonce
tmpOut.Message = tmpMessage
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.Error(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, l2Messages)
})
}
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2AppendMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack AppendMessage log failure", t, func() {
targetErr := errors.New("UnpackLog AppendMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2AppendMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2AppendMessageEvent)
tmpOut.MessageHash = msgHash
tmpOut.Index = big.NewInt(100)
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
}

View File

@@ -0,0 +1,105 @@
package watcher
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
base *docker.App
// l2geth client
l2Cli *ethclient.Client
// block trace
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = base.DBConfig
// Create l2geth client.
l2Cli, err = base.L2Client()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err
}
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err
}
return err
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 watcher test cases.
t.Run("TestStartWatcher", testFetchContractEvent)
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
t.Run("TestBatchProposerBatchGeneration", testBatchProposerBatchGeneration)
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
}

View File

@@ -13,7 +13,7 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build event_watcher
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/event_watcher /bin/
ENTRYPOINT ["event_watcher"]

View File

@@ -2,4 +2,4 @@ assets/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*

View File

@@ -11,16 +11,16 @@ COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build bridge
# Build gas_oracle
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull bridge into a second stage deploy alpine container
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/bridge /bin/
COPY --from=builder /bin/gas_oracle /bin/
ENTRYPOINT ["bridge"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build msg_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
# Pull msg_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/msg_relayer /bin/
ENTRYPOINT ["msg_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/rollup_relayer /bin/
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

26
build/run_tests.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
set -uex
profile_name=$1
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"
for pkg in $all_packages; do
exclude_pkg=false
for exclude_dir in "${exclude_dirs[@]}"; do
if [[ $pkg == $exclude_dir* ]]; then
exclude_pkg=true
break
fi
done
if [ "$exclude_pkg" = false ]; then
coverpkg="$coverpkg,$pkg/..."
fi
done
echo "coverage.${profile_name}.txt"
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...

2
common/bytecode/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.go
*.sol

9
common/bytecode/Makefile Normal file
View File

@@ -0,0 +1,9 @@
.PHONY: all erc20 greeter
all: erc20 greeter
erc20:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./erc20/ERC20Mock.json --pkg erc20 --out ./erc20/ERC20Mock.go
greeter:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./greeter/Greeter.json --pkg greeter --out ./greeter/Greeter.go

40
common/bytecode/README.md Normal file
View File

@@ -0,0 +1,40 @@
## How to pre deploy contracts?
* Please reference to https://github.com/scroll-tech/genesis-creator.
1. Setup env
```bash
git clone git@github.com:scroll-tech/genesis-creator.git
cd genesis-creator
go get -v github.com/scroll-tech/go-ethereum@develop && go mod tidy
make abi && make genesis-creator
make l2geth-docker
```
2. Start docker and write pre deployed contracts into genesis file.
```bash
make start-docker
./bin/genesis-creator -genesis ${SCROLLPATH}/common/docker/l2geth/genesis.json -contract [erc20|greeter]
```
3. Rebuild l2geth docker.
```bash
cd ${SCROLLPATH}
make dev_docker
```
## How to get contract abi?
* Other contracts' step same to eth20, e.g:
1. Install solc.
*Reference to https://docs.soliditylang.org/en/latest/installing-solidity.html*
2. Get abi file.
```bash
cd genesis-creator
solc --combined-json "abi" --optimize ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.sol | jq > ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.json
```
3. Translate abi to go.
```bash
cd ${SCROLLPATH}
make -C common/bytecode all
```

View File

@@ -0,0 +1,387 @@
{
"contracts": {
"tests/contracts/erc20/erc20.sol:ERC20Mock": {
"abi": [
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "address",
"name": "initialAccount",
"type": "address"
},
{
"internalType": "uint256",
"name": "initialBalance",
"type": "uint256"
}
],
"stateMutability": "payable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "approveInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "transferInternal",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

View File

@@ -0,0 +1,72 @@
{
"contracts": {
"greeter/Greeter.sol:Greeter": {
"abi": [
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"inputs": [],
"name": "retrieve",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "retrieve_failing",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "num",
"type": "uint256"
}
],
"name": "set_value_failing",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
}
},
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
}

View File

@@ -31,6 +31,8 @@ type Cmd struct {
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
// open log flag.
openLog bool
// error channel
ErrChan chan error
}
@@ -64,7 +66,7 @@ func (c *Cmd) runCmd() {
// RunCmd parallel running when parallel is true.
func (c *Cmd) RunCmd(parallel bool) {
fmt.Println("cmd: ", c.args)
fmt.Println("cmd:", c.args)
if parallel {
go c.runCmd()
} else {
@@ -72,12 +74,17 @@ func (c *Cmd) RunCmd(parallel bool) {
}
}
// OpenLog open cmd log by this api.
func (c *Cmd) OpenLog(open bool) {
c.openLog = open
}
func (c *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
fmt.Printf("%s: %v", c.name, out)
if verbose || c.openLog {
fmt.Printf("%s:\n\t%v", c.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
fmt.Printf("%s: %v", c.name, out)
fmt.Printf("%s:\n\t%v", c.name, out)
}
go c.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)

View File

@@ -39,9 +39,9 @@ type App struct {
L2gethImg GethImgInstance
DBImg ImgInstance
dbClient *sql.DB
DBConfig *database.DBConfig
dbFile string
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
Timestamp int
@@ -51,11 +51,11 @@ type App struct {
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
dbFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
@@ -79,14 +79,14 @@ func (b *App) RunDBImage(t *testing.T) {
var isRun bool
// try 5 times until the db is ready.
utils.TryTimes(10, func() bool {
db, _ := sqlx.Open("postgres", b.DBImg.Endpoint())
isRun = db != nil && db.Ping() == nil
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
isRun = err == nil && db != nil && db.Ping() == nil
return isRun
})
assert.Equal(t, true, isRun)
}
// Free clear all running images
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
@@ -96,7 +96,7 @@ func (b *App) Free() {
}
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.dbFile)
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
@@ -145,7 +145,7 @@ func (b *App) L2Client() (*ethclient.Client, error) {
}
// DBClient create and return *sql.DB instance.
func (b App) DBClient(t *testing.T) *sql.DB {
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
@@ -177,7 +177,7 @@ func (b *App) mockDBConfig() error {
return err
}
return os.WriteFile(b.dbFile, data, 0644) //nolint:gosec
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker() GethImgInstance {

View File

@@ -84,7 +84,7 @@ func (i *ImgDB) IsRunning() bool {
}
func (i *ImgDB) prepare() []string {
cmd := []string{"docker", "run", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),

View File

@@ -59,9 +59,8 @@ func (i *ImgGeth) Start() error {
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(i.Endpoint())
if client != nil {
var err error
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
@@ -147,7 +146,7 @@ func (i *ImgGeth) Stop() error {
}
func (i *ImgGeth) prepare() []string {
cmds := []string{"docker", "run", "--name", i.name}
cmds := []string{"docker", "run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:prealpha-v5.1
FROM scrolltech/l2geth:scroll-v3.1.4
RUN mkdir -p /l2geth/keystore

File diff suppressed because one or more lines are too long

View File

@@ -5,9 +5,9 @@ go 1.18
require (
github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/lib/pq v1.10.7
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.16
github.com/mattn/go-isatty v0.0.18
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
@@ -32,8 +32,10 @@ require (
github.com/ethereum/go-ethereum v1.11.4 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
@@ -51,13 +53,14 @@ require (
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.26.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
@@ -65,7 +68,9 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.2 // indirect
@@ -73,19 +78,19 @@ require (
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/tools v0.8.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect

View File

@@ -108,8 +108,9 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1T
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
@@ -119,8 +120,9 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
@@ -244,8 +246,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -259,11 +261,13 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
@@ -297,8 +301,9 @@ github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -335,10 +340,14 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -380,10 +389,10 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
@@ -444,8 +453,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -467,8 +476,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -519,11 +528,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -531,14 +541,14 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -569,13 +579,12 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=

View File

@@ -1,9 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char};
use libc::c_char;
use std::cell::OnceCell;
use std::panic;
use std::ptr::null;
use types::eth::BlockTrace;
use zkevm::circuit::AGG_DEGREE;
use zkevm::utils::{load_or_create_params, load_or_create_seed};
use zkevm::utils::{load_params, load_seed};
use zkevm::{circuit::DEGREE, prover::Prover};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -15,9 +17,9 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
let params_path = c_char_to_str(params_path);
let seed_path = c_char_to_str(seed_path);
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_or_create_seed(seed_path).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_seed(seed_path).unwrap();
let p = Prover::from_params_and_seed(params, agg_params, seed);
PROVER.set(p).unwrap();
}
@@ -27,13 +29,15 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let trace = serde_json::from_slice::<BlockTrace>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof(&trace)
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof(&trace)
.unwrap();
serde_json::to_vec(&proof).unwrap()
});
proof_result.map_or(null(), vec_to_c_char)
}
/// # Safety
@@ -41,11 +45,13 @@ pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c
pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let traces = serde_json::from_slice::<Vec<BlockTrace>>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
serde_json::to_vec(&proof).unwrap()
});
proof_result.map_or(null(), vec_to_c_char)
}

View File

@@ -2,9 +2,10 @@ use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use std::fs::File;
use std::io::Read;
use std::panic;
use zkevm::circuit::{AGG_DEGREE, DEGREE};
use zkevm::prover::AggCircuitProof;
use zkevm::utils::load_or_create_params;
use zkevm::utils::load_params;
use zkevm::verifier::Verifier;
static mut VERIFIER: Option<&Verifier> = None;
@@ -20,8 +21,8 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
let mut agg_vk = vec![];
f.read_to_end(&mut agg_vk).unwrap();
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
VERIFIER = Some(Box::leak(v))
@@ -32,9 +33,11 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
pub unsafe extern "C" fn verify_agg_proof(proof: *const c_char) -> c_char {
let proof_vec = c_char_to_vec(proof);
let agg_proof = serde_json::from_slice::<AggCircuitProof>(proof_vec.as_slice()).unwrap();
let verified = VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok();
verified as c_char
let verified = panic::catch_unwind(|| {
VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok()
});
verified.unwrap_or(false) as c_char
}

View File

@@ -1,21 +0,0 @@
package testdata
import (
"encoding/json"
"os"
"github.com/scroll-tech/go-ethereum/core/types"
)
// GetTrace returns trace by file name.
func GetTrace(file string) *types.BlockTrace {
data, err := os.ReadFile(file)
if err != nil {
panic(err)
}
trace := &types.BlockTrace{}
if err = json.Unmarshal(data, &trace); err != nil {
panic(err)
}
return trace
}

View File

@@ -5,6 +5,8 @@ import (
"database/sql"
"fmt"
"time"
"scroll-tech/common/types/message"
)
// L1BlockStatus represents current l1 block processing status
@@ -162,6 +164,8 @@ type SessionInfo struct {
ID string `json:"id"`
Rollers map[string]*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
Attempts uint8 `json:"attempts,omitempty"`
ProveType message.ProveType `json:"prove_type,omitempty"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -255,3 +259,16 @@ type BlockBatch struct {
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}
// AggTask is a wrapper type around db AggProveTask type.
type AggTask struct {
ID string `json:"id" db:"id"`
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
CreatedTime *time.Time `json:"created_time" db:"created_time"`
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"`
}

View File

@@ -22,6 +22,27 @@ const (
StatusProofError
)
// ProveType represents the type of roller.
type ProveType uint8
func (r ProveType) String() string {
switch r {
case BasicProve:
return "Basic Prove"
case AggregatorProve:
return "Aggregator Prove"
default:
return "Illegal Prove type"
}
}
const (
// BasicProve is default roller, it only generates zk proof from traces.
BasicProve ProveType = iota
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof.
AggregatorProve
)
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
// It effectively acts as a registration, and makes the Roller identification
// known to the Sequencer.
@@ -36,10 +57,10 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Roller RollerType
RollerType ProveType `json:"roller_type,omitempty"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
@@ -56,13 +77,14 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil
}
// Sign auth message
func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
@@ -80,36 +102,27 @@ func (a *AuthMsg) Verify() (bool, error) {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.Identity.PublicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.Identity.PublicKey), hash, sig[:len(sig)-1]), nil
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
if a.Identity.PublicKey == "" {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.Identity.PublicKey, nil
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
return a.Identity.PublicKey, nil
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
@@ -188,14 +201,19 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
ID string `json:"id"`
Traces []*types.BlockTrace `json:"blockTraces"`
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
// Only basic rollers need traces, aggregator rollers don't!
Traces []*types.BlockTrace `json:"blockTraces,omitempty"`
// Only aggregator rollers need proofs to aggregate, basic rollers don't!
SubProofs [][]byte `json:"sub_proofs,omitempty"`
}
// ProofDetail is the message received from rollers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"`

View File

@@ -19,7 +19,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
assert.NoError(t, authMsg.SignWithKey(privkey))
// check public key.
pk, err := authMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
ok, err := authMsg.Verify()
assert.NoError(t, err)

View File

@@ -8,10 +8,30 @@ import (
"github.com/urfave/cli/v2"
)
// MockAppName a new type mock app.
type MockAppName string
var (
// EventWatcherApp the name of mock event-watcher app.
EventWatcherApp MockAppName = "event-watcher-test"
// GasOracleApp the name of mock gas-oracle app.
GasOracleApp MockAppName = "gas-oracle-test"
// MessageRelayerApp the name of mock message-relayer app.
MessageRelayerApp MockAppName = "message-relayer-test"
// RollupRelayerApp the name of mock rollup-relayer app.
RollupRelayerApp MockAppName = "rollup-relayer-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// RollerApp the name of mock roller app.
RollerApp MockAppName = "roller-test"
)
// RegisterSimulation register initializer function for integration-test.
func RegisterSimulation(app *cli.App, name string) {
func RegisterSimulation(app *cli.App, name MockAppName) {
// Run the app for integration-test
reexec.Register(name, func() {
reexec.Register(string(name), func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v3.0.2"
var tag = "v3.0.14"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -13,7 +13,7 @@ The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 an
### batchWithdrawERC1155
```solidity
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -32,7 +32,7 @@ Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
### batchWithdrawERC1155
```solidity
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -319,7 +319,7 @@ Update layer 2 to layer 1 token mapping.
### withdrawERC1155
```solidity
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
@@ -338,7 +338,7 @@ Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
### withdrawERC1155
```solidity
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.

View File

@@ -13,7 +13,7 @@ The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and
### batchWithdrawERC721
```solidity
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
@@ -31,7 +31,7 @@ Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
### batchWithdrawERC721
```solidity
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
@@ -266,7 +266,7 @@ Update layer 2 to layer 1 token mapping.
### withdrawERC721
```solidity
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external payable
```
Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
@@ -284,7 +284,7 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
### withdrawERC721
```solidity
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external payable
```
Withdraw some ERC721 NFT to caller&#39;s account on layer 1.

View File

@@ -216,12 +216,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, _to, _value, _messageNonce, _message);
// compute and deduct the messaging fee to fee vault.
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(
address(this),
_counterpart,
_xDomainCalldata,
_gasLimit
);
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_gasLimit);
require(msg.value >= _fee + _value, "Insufficient msg.value");
if (_fee > 0) {
(bool _success, ) = feeVault.call{value: _fee}("");

View File

@@ -85,7 +85,7 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
// but it seems not a big problem.
IERC20Upgradeable(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to` in the near future
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -74,7 +74,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
(bool _success, ) = _to.call{value: _amount}("");
require(_success, "ETH transfer failed");
// @todo farward _data to `_to` in near future.
_doCallback(_to, _data);
emit FinalizeWithdrawETH(_from, _to, _amount, _data);
}

View File

@@ -99,7 +99,7 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
// but it seems not a big problem.
IERC20(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to` in the near future
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

View File

@@ -90,7 +90,7 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
IWETH(_l1Token).deposit{value: _amount}();
IERC20(_l1Token).safeTransfer(_to, _amount);
// @todo forward `_data` to `_to`.
_doCallback(_to, _data);
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}

Some files were not shown because too many files have changed in this diff Show More