Compare commits

..

4 Commits

Author SHA1 Message Date
Péter Garamvölgyi
d783d01f4b fix import_genesis_block.ts 2022-11-09 13:32:46 +01:00
Péter Garamvölgyi
3c982a9199 Merge branch 'staging' into foundry-deployment-script 2022-11-02 10:01:40 +01:00
Péter Garamvölgyi
13bcda11b7 Merge branch 'staging' into foundry-deployment-script 2022-10-27 22:03:10 +02:00
Thegaram
632ca10157 use Foundry for deployment scripts 2022-10-26 21:22:05 -05:00
230 changed files with 117698 additions and 71073 deletions

View File

@@ -1,7 +0,0 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

View File

@@ -31,11 +31,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
uses: pontem-network/get-solc@master
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint

View File

@@ -24,21 +24,12 @@ jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint

View File

@@ -24,11 +24,6 @@ jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2022-08-23
override: true
components: rustfmt, clippy
- name: Install Go
@@ -35,15 +35,24 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Cache cargo registry
uses: actions/cache@v2
with:
workspaces: "common/libzkp/impl -> target"
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/roller/core/prover/rust/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Test
run: |
make roller
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
export CHAIN_ID=534353
go test -v ./...
check:
runs-on: ubuntu-latest

6
.gitignore vendored
View File

@@ -1,9 +1,3 @@
.idea
assets/params*
assets/seed
coverage.txt
build/bin
*.integration.txt
# misc
sftp-config.json

View File

@@ -1,5 +0,0 @@
repos:
- repo: git@github.com:scroll-tech/keydead.git
rev: main
hooks:
- id: keydead

179
Jenkinsfile vendored
View File

@@ -1,6 +1,8 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
def boolean test_result = false
pipeline {
agent any
options {
@@ -8,114 +10,105 @@ pipeline {
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
CHAIN_ID='534353'
// LOG_DOCKER = 'true'
}
stages {
stage('Build') {
parallel {
stage('Build Prerequisite') {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
}
}
stage('Check Coordinator Compilation') {
steps {
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
sh 'make -C coordinator coordinator'
}
}
stage('Check Database Compilation') {
steps {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
}
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
}
stage('Parallel Test') {
parallel{
stage('Test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
}
}
stage('Test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
}
}
stage('Test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
}
}
stage('Test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test coordinator package') {
steps {
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test database package') {
steps {
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
steps {
//start to build project
sh '''#!/bin/bash
export PATH=/home/ubuntu/go/bin:$PATH
make dev_docker
make -C bridge mock_abi
make -C bridge bridge
make -C bridge docker
make -C coordinator coordinator
make -C coordinator docker
'''
}
}
stage('Compare Coverage') {
stage('Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
sh "docker ps -aq | xargs -r docker stop"
sh "docker container prune -f"
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh '''
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/migrate
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/abi
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l1
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l2
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/sender
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/verifier
cd ..
'''
script {
for (i in ['bridge', 'coordinator', 'database']) {
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
}
}
script { test_result = true }
}
}
}
stage('Docker') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
script {
if (test_result == true) {
sh 'docker login --username=${dockerUser} --password=${dockerPassword}'
for (i in ['bridge', 'coordinator']) {
sh "docker build -t ${imagePrefix}/$i:${GIT_COMMIT} -f build/dockerfiles/${i}.Dockerfile ."
sh "docker push ${imagePrefix}/$i:${GIT_COMMIT}"
sh "docker rmi ${imagePrefix}/$i:${GIT_COMMIT}"
}
}
}
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}

View File

@@ -1,7 +1,5 @@
.PHONY: check update dev_docker clean
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -16,11 +14,11 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/bridge/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/roller/ && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
@@ -29,17 +27,7 @@ update: ## update dependencies
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
docker build -t scroll_l2geth ./common/docker/l2geth/.
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -5,8 +5,12 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
solc --bin mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
solc --abi mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
abigen --bin=mock_bridge/Mock_Bridge.bin \
--abi=mock_bridge/Mock_Bridge.abi \
--pkg=mock_bridge --out=mock_bridge/Mock_Bridge.go
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' mock_bridge/Mock_Bridge.go > temp && mv temp mock_bridge/Mock_Bridge.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
@@ -21,7 +25,7 @@ clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}

View File

@@ -1,12 +1,13 @@
# Bridge
[![Actions Status](https://scroll-tech/bridge/workflows/Continuous%20Integration/badge.svg)](https://scroll-tech/bridge/actions)
[![codecov](https://codecov.io/gh/scroll-tech/bridge/branch/master/graph/badge.svg)](https://codecov.io/gh/scroll-tech/bridge)
This repo contains the Scroll bridge.
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
between the two, over JSON-RPC sockets.
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
## Dependency
+ install `abigen`
@@ -22,7 +23,38 @@ make clean
make bridge
```
## db operation
* init, show version, rollback, check status db
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
# TEST_DB_DRIVER, TEST_DB_DSN: It is required when executing db test cases
export TEST_DB_DRIVER="postgres"
export TEST_DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# init db
./build/bin/bridge reset [--config ./config.json]
# show db version
./build/bin/bridge version [--config ./config.json]
# rollback db
/build/bin/bridge rollback [--version version] [--config ./config.json]
# show db status
./build/bin/bridge status [--config ./config.json]
# migrate db
./build/bin/bridge migrate [--config ./config.json]
```
## Start
* use default ports and config.json
```bash

File diff suppressed because one or more lines are too long

View File

@@ -17,20 +17,29 @@ func TestPackRelayMessageWithProof(t *testing.T) {
assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(0),
BatchIndex: big.NewInt(0),
BlockNumber: big.NewInt(0),
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBatch(t *testing.T) {
func TestPackCommitBlock(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
assert.NoError(err)
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
for i := 0; i < 5; i++ {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
@@ -41,35 +50,13 @@ func TestPackCommitBatch(t *testing.T) {
GasPrice: big.NewInt(0),
Value: big.NewInt(0),
Data: make([]byte, 0),
R: big.NewInt(0),
S: big.NewInt(0),
V: 0,
}
}
header := bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
Txs: txns,
}
batch := bridge_abi.IZKRollupLayer2Batch{
BatchIndex: 0,
ParentHash: common.Hash{},
Blocks: []bridge_abi.IZKRollupLayer2BlockHeader{header},
}
_, err = l1RollupABI.Pack("commitBatch", batch)
_, err = l1RollupABI.Pack("commitBlock", header, txns)
assert.NoError(err)
}
func TestPackFinalizeBatchWithProof(t *testing.T) {
func TestPackFinalizeBlockWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
@@ -82,7 +69,7 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
instance[i] = big.NewInt(0)
}
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
_, err = l1RollupABI.Pack("finalizeBlockWithProof", common.Hash{}, proof, instance)
assert.NoError(err)
}

View File

@@ -1,130 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// Init db connection.
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,19 +0,0 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd(t, "bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,31 +0,0 @@
package app
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
)

88
bridge/cmd/db_client.go Normal file
View File

@@ -0,0 +1,88 @@
package main
import (
"scroll-tech/bridge/config"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func initDB(file string) (*sqlx.DB, error) {
cfg, err := config.NewConfig(file)
if err != nil {
return nil, err
}
dbCfg := cfg.DBConfig
factory, err := database.NewOrmFactory(dbCfg)
if err != nil {
return nil, err
}
log.Debug("Got db config from env", "driver name", dbCfg.DriverName, "dsn", dbCfg.DSN)
return factory.GetDB(), nil
}
// ResetDB clean or reset database.
func ResetDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
var version int64
err = migrate.Rollback(db.DB, &version)
if err != nil {
return err
}
log.Info("successful to reset", "init version", version)
return nil
}
// CheckDBStatus check db status
func CheckDBStatus(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Status(db.DB)
}
// DBVersion return the latest version
func DBVersion(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version, err := migrate.Current(db.DB)
log.Info("show database version", "db version", version)
return err
}
// MigrateDB migrate db
func MigrateDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Migrate(db.DB)
}
// RollbackDB rollback db by version
func RollbackDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db.DB, &version)
}

93
bridge/cmd/flags.go Normal file
View File

@@ -0,0 +1,93 @@
package main
import "github.com/urfave/cli/v2"
var (
commonFlags = []cli.Flag{
&configFileFlag,
&verbosityFlag,
&logFileFlag,
&logJSONFormat,
&logDebugFlag,
}
// configFileFlag load json type config file.
configFileFlag = cli.StringFlag{
Name: "config",
Usage: "JSON configuration file",
Value: "./config.json",
}
// verbosityFlag log level.
verbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
// logFileFlag decides where the logger output is sent. If this flag is left
// empty, it will log to stdout.
logFileFlag = cli.StringFlag{
Name: "log.file",
Usage: "Tells the sequencer where to write log entries",
}
logJSONFormat = cli.BoolFlag{
Name: "log.json",
Usage: "Tells the sequencer whether log format is json or not",
Value: true,
}
logDebugFlag = cli.BoolFlag{
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
l1Flags = []cli.Flag{
&l1ChainIDFlag,
&l1UrlFlag,
}
l1ChainIDFlag = cli.IntFlag{
Name: "l1.chainID",
Usage: "l1 chain id",
Value: 4,
}
l1UrlFlag = cli.StringFlag{
Name: "l1.endpoint",
Usage: "The endpoint connect to l1chain node",
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
}
l2Flags = []cli.Flag{
&l2ChainIDFlag,
&l2UrlFlag,
}
l2ChainIDFlag = cli.IntFlag{
Name: "l2.chainID",
Usage: "l2 chain id",
Value: 53077,
}
l2UrlFlag = cli.StringFlag{
Name: "l2.endpoint",
Usage: "The endpoint connect to l2chain node",
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
}
)

View File

@@ -1,7 +1,191 @@
package main
import "scroll-tech/bridge/cmd/app"
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/database"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
func main() {
app.Run()
// Set up Bridge app info.
app := cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, commonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, l1Flags...)
app.Flags = append(app.Flags, l2Flags...)
app.Before = func(ctx *cli.Context) error {
return utils.Setup(&utils.LogConfig{
LogFile: ctx.String(logFileFlag.Name),
LogJSONFormat: ctx.Bool(logJSONFormat.Name),
LogDebug: ctx.Bool(logDebugFlag.Name),
Verbosity: ctx.Int(verbosityFlag.Name),
})
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: ResetDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "status",
Usage: "Check migration status.",
Action: CheckDBStatus,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: DBVersion,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: MigrateDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: RollbackDB,
Flags: []cli.Flag{
&configFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
},
},
},
}
// Run the sequencer.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(l1ChainIDFlag.Name) {
cfg.L1Config.ChainID = ctx.Int64(l1ChainIDFlag.Name)
}
if ctx.IsSet(l1UrlFlag.Name) {
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
}
if ctx.IsSet(l2ChainIDFlag.Name) {
cfg.L2Config.ChainID = ctx.Int64(l2ChainIDFlag.Name)
}
if ctx.IsSet(l2UrlFlag.Name) {
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(configFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
srv := rpc.NewServer()
apis := l2Backend.APIs()
for _, api := range apis {
if err = srv.RegisterName(api.Namespace, api.Service); err != nil {
log.Crit("register namespace failed", "namespace", api.Namespace, "error", err)
}
}
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
rpc.DefaultHTTPTimeouts,
srv)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}

View File

@@ -1,9 +1,9 @@
{
"l1_config": {
"confirmations": "0x6",
"confirmations": 6,
"chain_id": 4,
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
@@ -11,53 +11,39 @@
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"check_pending_time": 3,
"escalate_blocks": 100,
"confirmations": "0x1",
"confirmations": 1,
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"tx_type": "AccessListTx"
},
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
"private_key": "abc123abc123abc123abc123abc123abc123abc123abc123abc123abc123abc1"
}
},
"l2_config": {
"confirmations": "0x1",
"confirmations": 1,
"chain_id": 53077,
"proof_generation_freq": 1,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
],
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
"escalate_blocks": 100,
"confirmations": "0x6",
"confirmations": 6,
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
"tx_type": "DynamicFeeTx"
},
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
},
"batch_proposer_config": {
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 135,
"batch_time_sec": 300,
"batch_blocks_limit": 100,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
]
"private_key": "1212121212121212121212121212121212121212121212121212121212121212"
}
},
"db_config": {

View File

@@ -5,14 +5,84 @@ import (
"os"
"path/filepath"
"scroll-tech/database"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// the transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
}
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 chainID.
ChainID int64 `json:"chain_id"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth chainId.
ChainID int64 `json:"chain_id"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes []string `json:"skipped_opcodes"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// RelayerConfig loads relayer configuration items.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
PrivateKey string `json:"private_key"`
}
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"`
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.
@@ -28,5 +98,9 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
return cfg, nil
}

View File

@@ -1,39 +0,0 @@
package config_test
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
)
func TestConfig(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.True(t, assert.NoError(t, err), "failed to load config")
assert.True(t, len(cfg.L2Config.BatchProposerConfig.SkippedOpcodes) > 0)
assert.True(t, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys) > 0)
data, err := json.Marshal(cfg)
assert.NoError(t, err)
tmpJosn := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() { _ = os.Remove(tmpJosn) }()
assert.NoError(t, os.WriteFile(tmpJosn, data, 0644))
cfg2, err := config.NewConfig(tmpJosn)
assert.NoError(t, err)
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
}

View File

@@ -1,22 +0,0 @@
package config
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/rpc"
)
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations rpc.BlockNumber `json:"confirmations"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address"`
// The rollup contract address deployed on layer 1 chain.
RollupContractAddress common.Address `json:"rollup_contract_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}

View File

@@ -1,75 +0,0 @@
package config
import (
"encoding/json"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/common"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations rpc.BlockNumber `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
}
// BatchProposerConfig loads l2watcher batch_proposer configuration items.
type BatchProposerConfig struct {
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Txnum threshold in a batch
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
// Gas threshold in a batch
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes map[string]struct{} `json:"-"`
}
// batchProposerConfigAlias RelayerConfig alias name
type batchProposerConfigAlias BatchProposerConfig
// UnmarshalJSON unmarshal BatchProposerConfig config struct.
func (b *BatchProposerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*b = BatchProposerConfig(jsonConfig.batchProposerConfigAlias)
b.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
for _, opcode := range jsonConfig.SkippedOpcodes {
b.SkippedOpcodes[opcode] = struct{}{}
}
return nil
}
// MarshalJSON marshal BatchProposerConfig in order to transfer skipOpcodes.
func (b *BatchProposerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}{batchProposerConfigAlias(*b), nil}
// Load skipOpcodes.
for op := range b.SkippedOpcodes {
jsonConfig.SkippedOpcodes = append(jsonConfig.SkippedOpcodes, op)
}
return json.Marshal(&jsonConfig)
}

View File

@@ -1,108 +0,0 @@
package config
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations rpc.BlockNumber `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// relayerConfigAlias RelayerConfig alias name
type relayerConfigAlias RelayerConfig
// UnmarshalJSON unmarshal relayer_config struct.
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
// Get messenger private key list.
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}
return nil
}
// MarshalJSON marshal RelayerConfig config, transfer private keys.
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil}
// Transfer message sender private keys to hex type.
for _, priv := range r.MessageSenderPrivateKeys {
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.RollupSenderPrivateKeys {
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
return json.Marshal(&jsonConfig)
}

View File

@@ -3,43 +3,40 @@ module scroll-tech/bridge
go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/ethereum/go-ethereum v1.10.13
github.com/gorilla/websocket v1.4.2
github.com/jmoiron/sqlx v1.3.5
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
github.com/urfave/cli/v2 v2.3.0
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.12 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.4.3 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -71,10 +71,10 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -82,9 +82,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -93,9 +92,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
@@ -110,9 +108,8 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -139,6 +136,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -189,9 +188,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
@@ -204,13 +202,12 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12 h1:dXZF+R9iI07DK49LHX/EKC3jTa0O2z+TUyvxjGK7V38=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/iden3/go-iden3-crypto v0.0.13 h1:ixWRiaqDULNyIDdOWz2QQJG5t4PpNHkQk2P6GV94cok=
github.com/iden3/go-iden3-crypto v0.0.13/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
@@ -223,12 +220,14 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -249,9 +248,8 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -262,6 +260,9 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -282,6 +283,9 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -316,7 +320,6 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -336,30 +339,23 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d h1:eh1i1M9BKPCQckNFQsV/yfazQ895hevkWr8GuqhKNrk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d/go.mod h1:SkQ1431r0LkrExTELsapw6JQHYpki8O1mSsObTSmBWU=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -393,16 +389,13 @@ github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
@@ -424,8 +417,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -493,9 +486,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -540,8 +532,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -552,8 +544,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -663,7 +655,5 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -4,9 +4,11 @@ import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
@@ -16,22 +18,28 @@ type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm database.OrmFactory
orm orm.Layer1MessageOrm
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
func New(ctx context.Context, cfg *config.L1Config, orm orm.Layer1MessageOrm) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L1MessengerABI failed", "err", err)
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, l1MessengerABI, orm)
return &Backend{
cfg: cfg,

View File

@@ -1,65 +0,0 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
)
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -2,7 +2,6 @@ package l1
import (
"context"
"errors"
"math/big"
"time"
@@ -11,6 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
@@ -21,16 +21,17 @@ import (
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 1. fetch pending Layer1Message from db
// 2. relay pending message to layer 2 node
//
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer1Relayer struct {
ctx context.Context
client *ethclient.Client
sender *sender.Sender
db orm.L1MessageOrm
db orm.Layer1MessageOrm
cfg *config.RelayerConfig
// channel used to communicate with transaction sender
@@ -41,22 +42,28 @@ type Layer1Relayer struct {
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.Layer1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new sender failed", "main address", addr.String(), "err", err)
log.Error("Failed to import private key from config file")
return nil, err
}
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
if err != nil {
log.Error("new sender failed", "err", err)
return nil, err
}
return &Layer1Relayer{
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l2MessengerABI: l2MessengerABI,
@@ -69,29 +76,17 @@ func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.Rela
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
msgs, err := r.db.GetL1UnprocessedMessages()
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
}
if len(msgs) > 0 {
log.Info("Processing L1 messages", "count", len(msgs))
if len(msgs) == 0 {
return
}
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
}
}
}
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
msg := msgs[0]
// @todo add support to relay multiple messages
from := common.HexToAddress(msg.Sender)
sender := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
@@ -103,30 +98,24 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", from, target, value, fee, deadline, msgNonce, calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", sender, target, value, fee, deadline, msgNonce, calldata)
if err != nil {
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
return
}
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
hash, err := r.sender.SendTransaction(msg.Layer1Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
return err
log.Error("Failed to send relayMessage tx to L2", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
return
}
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
log.Info("relayMessage to layer2", "layer1 hash", msg.Layer1Hash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.Layer1Hash, hash.String(), orm.MsgSubmitted)
if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.layer1hash", msg.Layer1Hash, "msg.height", msg.Height, "err", err)
}
return err
}
// Start the relayer process
@@ -142,17 +131,12 @@ func (r *Layer1Relayer) Start() {
// number, err := r.client.BlockNumber(r.ctx)
// log.Info("receive header", "height", number)
r.ProcessSavedEvents()
case cfm := <-r.confirmationCh:
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
case cfm := <-r.confirmationCh: // @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, cfm.TxHash.String(), orm.MsgConfirmed)
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
case <-r.stopCh:
return
}

View File

@@ -1,27 +1,58 @@
package l1
package l1_test
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database/migrate"
"scroll-tech/database"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/common/utils"
)
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8570,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testl1relayer_db",
DbPort: 5440,
DB_CONFIG: &database.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5440/testl1relayer_db?sslmode=disable"),
},
},
}
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
// TestCreateNewRelayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l1docker := mock.NewTestL1Docker(t, TEST_CONFIG)
defer l1docker.Stop()
client, err := ethclient.Dial(l1docker.Endpoint())
assert.NoError(t, err)
dbImg := mock.GetDbDocker(t, TEST_CONFIG)
dbImg.Start()
defer dbImg.Stop()
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
assert.NoError(t, err)
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
defer relayer.Stop()
}

View File

@@ -5,52 +5,33 @@ import (
"math/big"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
)
var (
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
const (
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
type rollupEvent struct {
batchID common.Hash
txHash common.Hash
status orm.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
ctx context.Context
client *ethclient.Client
db database.OrmFactory
db orm.Layer1MessageOrm
// The number of new blocks to wait for a block to be confirmed
confirmations rpc.BlockNumber
confirmations uint64
messengerAddress common.Address
messengerABI *abi.ABI
rollupAddress common.Address
rollupABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
@@ -59,7 +40,7 @@ type Watcher struct {
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, messengerABI *abi.ABI, db orm.Layer1MessageOrm) *Watcher {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -77,265 +58,123 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1MessengerMetaABI,
rollupAddress: rollupAddress,
rollupABI: bridge_abi.RollupMetaABI,
messengerABI: messengerABI,
processedMsgHeight: uint64(savedHeight),
stop: stop,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
func (r *Watcher) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for ; true; <-ticker.C {
for {
select {
case <-w.stop:
return
default:
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
case <-ticker.C:
blockNumber, err := r.client.BlockNumber(r.ctx)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
log.Error("Failed to get block number", "err", err)
}
if err := w.FetchContractEvent(number); err != nil {
if err := r.fetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
case <-r.stop:
return
}
}
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stop <- true
func (r *Watcher) Stop() {
r.stop <- true
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
func (r *Watcher) fetchContractEvent(blockHeight uint64) error {
fromBlock := int64(r.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(r.confirmations)
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
return nil
}
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
if toBlock < fromBlock {
return nil
}
return nil
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
r.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
logs, err := r.client.FilterLogs(r.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
eventLogs, err := parseBridgeEventLogs(logs, r.messengerABI)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
err = r.db.SaveLayer1Messages(r.ctx, eventLogs)
if err == nil {
r.processedMsgHeight = uint64(toBlock)
}
return err
}
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer1Message, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1Messages []*orm.L1Message
var relayedMessages []relayedMessage
var rollupEvents []rollupEvent
var parsedlogs []*orm.Layer1Message
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "CommitBatch", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "FinalizeBatch", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupFinalized,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
return parsedlogs, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer1Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
}
return l1Messages, relayedMessages, rollupEvents, nil
return parsedlogs, nil
}

View File

@@ -1,29 +0,0 @@
package l1
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testStartWatcher(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
}

View File

@@ -3,11 +3,14 @@ package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
@@ -27,12 +30,29 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
skippedOpcodes := make(map[string]struct{}, len(cfg.SkippedOpcodes))
for _, op := range cfg.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.ProofGenerationFreq
if proofGenerationFreq == 0 {
log.Warn("receive 0 proof_generation_freq, change to 1")
proofGenerationFreq = 1
}
relayer, err := NewLayer2Relayer(ctx, client, proofGenerationFreq, skippedOpcodes, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2MessengerAddress, l2MessengerABI, orm)
return &Backend{
cfg: cfg,
@@ -66,3 +86,8 @@ func (l2 *Backend) APIs() []rpc.API {
},
}
}
// MockBlockResult for test case
func (l2 *Backend) MockBlockResult(blockResult *types.BlockResult) {
l2.l2Watcher.Send(blockResult)
}

View File

@@ -1,140 +0,0 @@
package l2
import (
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
type batchProposer struct {
mutex sync.Mutex
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
}
func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory) *batchProposer {
return &batchProposer{
mutex: sync.Mutex{},
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
proofGenerationFreq: cfg.ProofGenerationFreq,
skippedOpcodes: cfg.SkippedOpcodes,
}
}
func (w *batchProposer) tryProposeBatch() {
w.mutex.Lock()
defer w.mutex.Unlock()
blocks, err := w.orm.GetUnbatchedBlocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
length = len(blocks)
gasUsed, txNum uint64
)
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
blocks = blocks[:i]
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return
}
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
dbTx, err := w.orm.Beginx()
if err != nil {
return err
}
var dbTxErr error
defer func() {
if dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
}
}()
var (
batchID string
startBlock = blocks[0]
endBlock = blocks[len(blocks)-1]
txNum, gasUsed uint64
blockIDs = make([]uint64, len(blocks))
)
for i, block := range blocks {
txNum += block.TxNum
gasUsed += block.GasUsed
blockIDs[i] = block.Number
}
batchID, dbTxErr = w.orm.NewBatchInDBTx(dbTx, startBlock, endBlock, startBlock.ParentHash, txNum, gasUsed)
if dbTxErr != nil {
return dbTxErr
}
if dbTxErr = w.orm.SetBatchIDForBlocksInDBTx(dbTx, blockIDs, batchID); dbTxErr != nil {
return dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
}

View File

@@ -1,62 +0,0 @@
package l2
import (
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/common/utils"
)
func testBatchProposer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
trace2 := &types.BlockTrace{}
trace3 := &types.BlockTrace{}
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace2)
assert.NoError(t, err)
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace3)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
proposer := newBatchProposer(&config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, true, len(infos) == 0)
exist, err := db.BatchRecordExist(id)
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -10,8 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
)
//nolint:unused
func blockTraceIsValid(trace *types.BlockTrace) bool {
func blockTraceIsValid(trace *types.BlockResult) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
@@ -23,7 +22,6 @@ func blockTraceIsValid(trace *types.BlockTrace) bool {
return flag
}
//nolint:unused
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
@@ -50,7 +48,6 @@ func structLogResIsValid(txLogs []*types.StructLogRes) bool {
return res
}
//nolint:unused
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
@@ -63,7 +60,6 @@ func codeIsValid(txLog *types.StructLogRes, n int) bool {
return true
}
//nolint:unused
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
@@ -77,7 +73,7 @@ func stateIsValid(txLog *types.StructLogRes, n int) bool {
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockResult) bool {
if trace == nil {
return false
}

View File

@@ -1,87 +0,0 @@
package l2
import (
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
// l2geth client
l2Cli *ethclient.Client
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
// Create l2geth client.
l2Cli, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
return err
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -2,20 +2,18 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"strconv"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -23,7 +21,6 @@ import (
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
)
// Layer2Relayer is responsible for
@@ -33,120 +30,110 @@ import (
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
ctx context.Context
client *ethclient.Client
sender *sender.Sender
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
db database.OrmFactory
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
l1RollupABI *abi.ABI
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing block, indexed by block height
processingBlock map[string]uint64
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// a list of processing proof, indexed by block height
processingProof map[string]uint64
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
stopCh chan struct{}
// channel used to communicate with transaction sender
confirmationCh <-chan *sender.Confirmation
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGenFreq uint64, skippedOpcodes map[string]struct{}, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Error("Failed to create messenger sender", "err", err)
log.Error("Get L1MessengerABI failed", "err", err)
return nil, err
}
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
if err != nil {
log.Error("Failed to create rollup sender", "err", err)
log.Error("Get RollupABI failed", "err", err)
return nil, err
}
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
if err != nil {
log.Error("Failed to import private key from config file")
return nil, err
}
// @todo use different sender for relayer, block commit and proof finalize
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
if err != nil {
log.Error("Failed to create sender", "err", err)
return nil, err
}
return &Layer2Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l1MessengerABI: l1MessengerABI,
l1RollupABI: l1RollupABI,
cfg: cfg,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
processingMessage: map[string]string{},
processingBlock: map[string]uint64{},
processingProof: map[string]uint64{},
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
msgs, err := r.db.GetL2UnprocessedMessages()
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
return
}
if len(msgs) == 0 {
return
}
msg := msgs[0]
// @todo add support to relay multiple messages
latestFinalizeHeight, err := r.db.GetLatestFinalizedBlock()
if err != nil {
log.Error("GetLatestFinalizedBlock failed", "err", err)
return
}
if latestFinalizeHeight < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
BlockNumber: big.NewInt(int64(msg.Height)),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
sender := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
@@ -158,239 +145,222 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", sender, target, value, fee, deadline, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
return
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
hash, err := r.sender.SendTransaction(msg.Layer2Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
log.Error("Failed to send relayMessageWithProof tx to L1", "err", err)
return
}
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
log.Info("relayMessageWithProof to layer1", "layer2hash", msg.Layer2Hash, "txhash", hash.String())
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.Layer2Hash, hash.String(), orm.MsgSubmitted)
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "layer2hash", msg.Layer2Hash, "err", err)
}
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
r.processingMessage[msg.Layer2Hash] = msg.Layer2Hash
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
// ProcessPendingBlocks submit block data to layer rollup contract
func (r *Layer2Relayer) ProcessPendingBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetPendingBlocks()
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
log.Error("Failed to fetch pending L2 blocks", "err", err)
return
}
if len(batchesInDB) == 0 {
if len(blocksInDB) == 0 {
return
}
id := batchesInDB[0]
// @todo add support to relay multiple batches
height := blocksInDB[0]
// @todo add support to relay multiple blocks
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
return
}
batch := batches[0]
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
if err != nil || len(traces) == 0 {
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
return
}
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
BatchIndex: batch.Index,
ParentHash: common.HexToHash(batch.ParentHash),
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
}
parentHash := common.HexToHash(batch.ParentHash)
for i, trace := range traces {
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: trace.Header.Hash(),
ParentHash: parentHash,
BaseFee: trace.Header.BaseFee,
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.Header.Number.Uint64(),
GasUsed: 0,
Timestamp: trace.Header.Time,
ExtraData: make([]byte, 0),
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
}
for j, tx := range trace.Transactions {
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
R: tx.R.ToInt(),
S: tx.S.ToInt(),
V: tx.V.ToInt().Uint64(),
}
if tx.To != nil {
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
}
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
}
// for next iteration
parentHash = layer2Batch.Blocks[i].BlockHash
}
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
// will fetch missing block result from l2geth
trace, err := r.getOrFetchBlockResultByHeight(height)
if err != nil {
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
log.Error("getOrFetchBlockResultByHeight failed",
"height", height,
"err", err,
)
return
}
if trace == nil {
return
}
parentHash, err := r.getOrFetchBlockHashByHeight(height - 1)
if err != nil {
log.Error("getOrFetchBlockHashByHeight for parent block failed",
"parent height", height-1,
"err", err,
)
return
}
if parentHash == nil {
log.Error("parent hash is empty",
"height", height,
"err", err,
)
return
}
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: trace.BlockTrace.Hash,
ParentHash: *parentHash,
BaseFee: trace.BlockTrace.BaseFee.ToInt(),
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.BlockTrace.Number.ToInt().Uint64(),
GasUsed: 0,
Timestamp: trace.BlockTrace.Time,
ExtraData: make([]byte, 0),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.BlockTrace.Transactions))
for i, tx := range trace.BlockTrace.Transactions {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
}
if tx.To != nil {
txns[i].Target = *tx.To
}
header.GasUsed += trace.ExecutionResults[i].Gas
}
data, err := r.l1RollupABI.Pack("commitBlock", header, txns)
if err != nil {
log.Error("Failed to pack commitBlock", "height", height, "err", err)
return
}
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
hash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send commitBlock tx to layer1 ", "height", height, "err", err)
return
}
log.Info("commitBlock in layer1", "height", height, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
err = r.db.UpdateRollupTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
log.Error("UpdateRollupTxHashAndStatus failed", "height", height, "err", err)
}
r.processingCommitment.Store(txID, id)
r.processingBlock[strconv.FormatUint(height, 10)] = height
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
// ProcessCommittedBlocks submit proof to layer rollup contract
func (r *Layer2Relayer) ProcessCommittedBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetCommittedBlocks()
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
log.Error("Failed to fetch committed L2 blocks", "err", err)
return
}
if len(batches) == 0 {
if len(blocksInDB) == 0 {
return
}
id := batches[0]
// @todo add support to relay multiple batches
height := blocksInDB[0]
// @todo add support to relay multiple blocks
status, err := r.db.GetProvingStatusByID(id)
status, err := r.db.GetBlockStatusByNumber(height)
if err != nil {
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
log.Error("GetBlockStatusByNumber failed", "height", height, "err", err)
return
}
switch status {
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
case orm.BlockUnassigned, orm.BlockAssigned:
// The proof for this block is not ready yet.
return
case orm.ProvingTaskProved:
case orm.BlockProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
case orm.BlockFailed, orm.BlockSkipped:
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
}
case orm.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "id", id)
case orm.BlockVerified:
log.Info("Start to roll up zk proof", "height", height)
success := false
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "height", height)
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
}
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByNumber(height)
if err != nil {
log.Warn("fetch get proof by id failed", "id", id, "err", err)
log.Warn("fetch get proof by height failed", "height", height, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "id", id)
log.Warn("proof or instance not ready", "height", height)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
log.Error("proof buffer has wrong length", "height", height, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
log.Warn("instance buffer has wrong length", "height", height, "length", len(instanceBuffer))
return
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
proof := bufferToUint256Le(proofBuffer)
instance := bufferToUint256Le(instanceBuffer)
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
// it must in db
hash, err := r.db.GetHashByNumber(height)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
}
log.Warn("fetch missing block result by height failed", "height", height, "err", err)
}
if hash == nil {
// only happen when trace validate failed
return
}
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
data, err := r.l1RollupABI.Pack("finalizeBlockWithProof", hash, proof, instance)
if err != nil {
log.Error("Pack finalizeBlockWithProof failed", err)
return
}
txHash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash = &txHash
if err != nil {
log.Error("finalizeBlockWithProof in layer1 failed",
"height", height,
"err", err,
)
return
}
log.Info("finalizeBlockWithProof in layer1", "height", height, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
err = r.db.UpdateFinalizeTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndStatus failed", "height", height, "err", err)
}
success = true
r.processingFinalization.Store(txID, id)
r.processingProof[strconv.FormatUint(height, 10)] = height
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
log.Error("encounter unreachable case in ProcessCommittedBlocks",
"block_status", status,
)
}
@@ -398,42 +368,23 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// Start the relayer process
func (r *Layer2Relayer) Start() {
loop := func(ctx context.Context, f func()) {
go func() {
// trigger by timer
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
f()
r.ProcessSavedEvents()
r.ProcessPendingBlocks()
r.ProcessCommittedBlocks()
case confirmation := <-r.confirmationCh:
r.handleConfirmation(confirmation)
case <-r.stopCh:
return
}
}
}
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go loop(ctx, r.ProcessSavedEvents)
go loop(ctx, r.ProcessPendingBatches)
go loop(ctx, r.ProcessCommittedBatches)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}
@@ -442,44 +393,130 @@ func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
if !confirmation.IsSuccessful {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
return
func (r *Layer2Relayer) getOrFetchBlockHashByHeight(height uint64) (*common.Hash, error) {
hash, err := r.db.GetHashByNumber(height)
if err != nil {
block, err := r.client.BlockByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
x := block.Hash()
return &x, err
}
return hash, nil
}
func (r *Layer2Relayer) getOrFetchBlockResultByHeight(height uint64) (*types.BlockResult, error) {
tracesInDB, err := r.db.GetBlockResults(map[string]interface{}{"number": height})
if err != nil {
log.Warn("GetBlockResults failed", "height", height, "err", err)
return nil, err
}
if len(tracesInDB) == 0 {
return r.fetchMissingBlockResultByHeight(height)
}
return tracesInDB[0], nil
}
func (r *Layer2Relayer) fetchMissingBlockResultByHeight(height uint64) (*types.BlockResult, error) {
header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
trace, err := r.client.GetBlockResultByHash(r.ctx, header.Hash())
if err != nil {
return nil, err
}
if blockTraceIsValid(trace) {
// skip verify for unsupported block
skip := false
if height%r.proofGenerationFreq != 0 {
log.Info("skip proof generation", "block", height)
skip = true
} else if TraceHasUnsupportedOpcodes(r.skippedOpcodes, trace) {
log.Info("block has unsupported opcodes, skip proof generation", "block", height)
skip = true
}
if skip {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockSkipped); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
} else {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockUnassigned); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
}
return trace, nil
}
return nil, nil
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
if layer2Hash, ok := r.processingMessage[confirmation.ID]; ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, layer2Hash, confirmation.TxHash.String(), orm.MsgConfirmed)
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "err", err)
}
r.processingMessage.Delete(confirmation.ID)
delete(r.processingMessage, confirmation.ID)
}
// check whether it is block commitment transaction
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
if blockHeight, ok := r.processingBlock[confirmation.ID]; ok {
transactionType = "BlockCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateRollupTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
log.Warn("UpdateRollupTxHashAndStatus failed", "err", err)
}
r.processingCommitment.Delete(confirmation.ID)
delete(r.processingBlock, confirmation.ID)
}
// check whether it is proof finalization transaction
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
if blockHeight, ok := r.processingProof[confirmation.ID]; ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
log.Warn("UpdateFinalizeTxHashAndStatus failed", "err", err)
}
delete(r.processingProof, confirmation.ID)
// try to delete block trace
err = r.db.DeleteTraceByNumber(blockHeight)
if err != nil {
log.Warn("DeleteTraceByNumber failed", "err", err)
}
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
//nolint:unused
func bufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
func bufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}

View File

@@ -1,23 +1,29 @@
package l2
package l2_test
import (
"context"
"encoding/json"
"math/big"
"os"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/mock"
)
var (
templateL2Message = []*orm.L2Message{
templateLayer2Message = []*orm.Layer2Message{
{
Nonce: 1,
Height: 1,
@@ -31,230 +37,198 @@ var (
Layer2Hash: "hash0",
},
}
l1Docker docker.ImgInstance
l2Docker docker.ImgInstance
dbDocker docker.ImgInstance
)
func testCreateNewRelayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
func setupEnv(t *testing.T) {
l1Docker = mock.NewTestL1Docker(t, TEST_CONFIG)
l2Docker = mock.NewTestL2Docker(t, TEST_CONFIG)
dbDocker = mock.GetDbDocker(t, TEST_CONFIG)
}
func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
func TestRelayerFunction(t *testing.T) {
// Setup
setupEnv(t)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
t.Run("TestCreateNewRelayer", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)),
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
assert.NoError(t, err)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer.Start()
defer relayer.Stop()
})
t.Run("TestL2RelayerProcessSaveEvents", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
err = db.SaveLayer2Messages(context.Background(), templateLayer2Message)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 3,
Status: orm.RollupFinalized,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
},
{
Header: &types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
msg, err := db.GetLayer2MessageByNonce(templateLayer2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
})
t.Run("TestL2RelayerProcessPendingBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var results []*types.BlockResult
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer_parent.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
templateBlockResult, err = os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult = &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
err = db.InsertBlockResultsWithStatus(context.Background(), results, orm.BlockUnassigned)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: templateL2Message[0].Height},
&orm.BlockInfo{Number: templateL2Message[0].Height + 1},
"0f", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
templateL2Message[0].Height,
templateL2Message[0].Height + 1}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var traces []*types.BlockTrace
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
templateBlockTrace, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
blockTrace = &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: traces[0].Header.Number.Uint64()},
&orm.BlockInfo{Number: traces[1].Header.Number.Uint64()},
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
relayer.ProcessPendingBatches()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupPending)
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
relayer.ProcessPendingBlocks()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
})
t.Run("TestL2RelayerProcessCommittedBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
err = db.InsertBlockResultsWithStatus(context.Background(), []*types.BlockResult{blockResult}, orm.BlockVerified)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
tStateProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByNumber(context.Background(), uint64(blocks[0].Number), tProof, tStateProof, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
relayer.ProcessCommittedBlocks()
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
})
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1Docker.Stop())
assert.NoError(t, l2Docker.Stop())
assert.NoError(t, dbDocker.Stop())
})
}

View File

@@ -4,38 +4,30 @@ import (
"context"
"fmt"
"math/big"
"reflect"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
// Metrics
var (
bridgeL2MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l2/msg/sync/height", nil)
)
const (
// BufferSize for the BlockResult channel
BufferSize = 16
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// BlockResultCacheSize for the latest handled blockresults in memory.
BlockResultCacheSize = 64
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
@@ -46,21 +38,21 @@ type WatcherClient struct {
orm database.OrmFactory
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
confirmations uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
messengerAddress common.Address
messengerABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
batchProposer *batchProposer
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, proofGenFreq uint64, skippedOpcodes map[string]struct{}, messengerAddress common.Address, messengerABI *abi.ABI, orm database.OrmFactory) *WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
@@ -68,90 +60,54 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
}
return &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2MessengerMetaABI,
stopCh: make(chan struct{}),
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
messengerAddress: messengerAddress,
messengerABI: messengerABI,
stopCh: make(chan struct{}),
stopped: 0,
}
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if reflect.ValueOf(w.orm).IsNil() {
if w.orm == nil {
panic("must run L2 watcher with DB")
}
ctx, cancel := context.WithCancel(w.ctx)
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
// trace fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
w.tryFetchRunningMissingBlocks(ctx, number)
for {
select {
case <-ticker.C:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get_BlockNumber", "err", err)
continue
}
}
}(ctx)
// event fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
w.FetchContractEvent(number)
if err = w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
log.Error("Failed to fetchRunningMissingBlocks", "err", err)
}
}
}(ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
w.batchProposer.tryProposeBatch()
// @todo handle error
err = w.fetchContractEvent(number)
if err != nil {
log.Error("Failed to fetchContractEvent", "err", err)
}
}
}(ctx)
<-w.stopCh
cancel()
case <-w.stopCh:
return
}
}
}()
}
@@ -160,205 +116,150 @@ func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// Don't use "block_result" table "content" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
heightInDB, err := w.orm.GetBlockResultsLatestHeight()
if err != nil {
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
return fmt.Errorf("Failed to GetBlockResults in DB: %v", err)
}
// Can't get trace from genesis block, so the default start number is 1.
var from = uint64(1)
backTrackTo := uint64(0)
if heightInDB > 0 {
from = uint64(heightInDB) + 1
backTrackTo = uint64(heightInDB)
}
for ; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
// start backtracking
heights := []uint64{}
toSkipped := []*types.BlockResult{}
toUnassigned := []*types.BlockResult{}
if to > blockHeight {
to = blockHeight
var (
header *types.Header
trace *types.BlockResult
)
for number := backTrackFrom; number > backTrackTo; number-- {
header, err = w.HeaderByNumber(ctx, big.NewInt(int64(number)))
if err != nil {
return fmt.Errorf("Failed to get HeaderByNumber: %v. number: %v", err, number)
}
// Get block traces and insert into db.
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
trace, err = w.GetBlockResultByHash(ctx, header.Hash())
if err != nil {
return fmt.Errorf("Failed to GetBlockResultByHash: %v. number: %v", err, number)
}
}
}
log.Info("Retrieved block result", "height", header.Number, "hash", header.Hash())
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
}
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err := w.orm.InsertBlockTraces(traces); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
heights = append(heights, number)
if number%w.proofGenerationFreq != 0 {
toSkipped = append(toSkipped, trace)
} else if TraceHasUnsupportedOpcodes(w.skippedOpcodes, trace) {
toSkipped = append(toSkipped, trace)
} else {
toUnassigned = append(toUnassigned, trace)
}
}
if len(heights) > 0 {
if err = w.orm.InsertPendingBlocks(ctx, heights); err != nil {
return fmt.Errorf("Failed to batch insert PendingBlocks: %v", err)
}
}
if len(toSkipped) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toSkipped, orm.BlockSkipped); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
if len(toUnassigned) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toUnassigned, orm.BlockUnassigned); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
toBlock := int64(blockHeight) - int64(w.confirmations)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL2MsgSyncHeightGauge.Update(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
bridgeL2MsgSyncHeightGauge.Update(to)
if toBlock < fromBlock {
return nil
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
eventLogs, err := parseBridgeEventLogs(logs, w.messengerABI)
if err != nil {
log.Error("Failed to parse emitted event log", "err", err)
return err
}
err = w.orm.SaveLayer2Messages(w.ctx, eventLogs)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
}
return err
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer2Message, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2Messages []*orm.L2Message
var relayedMessages []relayedMessage
var parsedlogs []*orm.Layer2Message
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err)
return l2Messages, relayedMessages, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Error("Failed to unpack layer2 SentMessage event", "err", err)
return parsedlogs, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer2Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
}
return l2Messages, relayedMessages, nil
return parsedlogs, nil
}

View File

@@ -1,5 +1,33 @@
package l2
import (
"errors"
"github.com/scroll-tech/go-ethereum/rpc"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
}
// ReplayBlockResultByHash temporary interface for easy testing.
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
orm := r.orm
params := make(map[string]interface{})
if number, ok := blockNrOrHash.Number(); ok {
params["number"] = int64(number)
}
if hash, ok := blockNrOrHash.Hash(); ok {
params["hash"] = hash.String()
}
if len(params) == 0 {
return false, errors.New("empty params")
}
trace, err := orm.GetBlockResults(params)
if err != nil {
return false, err
}
r.Send(&trace[0])
return true, nil
}

View File

@@ -1,206 +1,319 @@
package l2
package l2_test
import (
"context"
"crypto/ecdsa"
"encoding/json"
"math/big"
"strconv"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
db_config "scroll-tech/database"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/mock_bridge"
)
func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
const TEST_BUFFER = 500
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8571,
},
L2GethTestConfig: mock.L2GethTestConfig{
HPort: 0,
WPort: 8567,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testwatcher_db",
DbPort: 5438,
DB_CONFIG: &db_config.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5438/testwatcher_db?sslmode=disable"),
},
},
}
var (
// previousHeight store previous chain height
previousHeight uint64
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
l2Backend *l2.Backend
)
func setenv(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
l1gethImg = mock.NewTestL1Docker(t, TEST_CONFIG)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
l2Backend, l2gethImg, dbImg = mock.L2gethDocker(t, cfg, TEST_CONFIG)
}
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc.Start()
defer rc.Stop()
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil)
func TestWatcherFunction(t *testing.T) {
setenv(t)
t.Run("TestL2Backend", func(t *testing.T) {
err := l2Backend.Start()
assert.NoError(t, err)
l2Backend.Stop()
})
t.Run("TestCreateNewWatcherAndStop", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
func testMonitorBridgeContract(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
messengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
assert.NoError(t, err)
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
l2db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.L2Config.ProofGenerationFreq
if proofGenerationFreq == 0 {
proofGenerationFreq = 1
}
rc := l2.NewL2WatcherClient(context.Background(), client, cfg.L2Config.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2Config.L2MessengerAddress, messengerABI, l2db)
rc.Start()
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc.Start()
defer rc.Stop()
for i := 0; i < numTransactions; i++ {
tx := mock.SendTxToL2Client(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
// wait for mining
_, err = bind.WaitMined(context.Background(), client, tx)
assert.NoError(t, err)
}
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
<-time.After(10 * time.Second)
blockNum, err := client.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
rc.Stop()
l2db.Close()
})
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
t.Run("TestMonitorBridgeContract", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
t.Log("confirmations:", cfg.L2Config.Confirmations)
// wait for dealing time
<-time.After(6 * time.Second)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
var latestHeight uint64
latestHeight, err = l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, tx)
assert.NoError(t, err)
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
for i := 0; i < numTransactions; i++ {
// Call mock_bridge instance sendMessage to trigger emit events
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
nonce, err := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, err)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
//extra block mined
addr = common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
rc.Stop()
db.Close()
})
t.Run("TestFetchMultipleSentMessageInOneBlock", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err := client.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, trx)
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
}
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
// wait for dealing time
<-time.After(6 * time.Second)
rc.Stop()
db.Close()
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1gethImg.Stop())
assert.NoError(t, l2gethImg.Stop())
assert.NoError(t, dbImg.Stop())
})
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, bpCfg, contractAddr, db)
func TestTraceHasUnsupportedOpcodes(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
delegateTrace, err := os.ReadFile("../../common/testdata/blockResult_delegate.json")
assert.NoError(t, err)
trace := &types.BlockResult{}
assert.NoError(t, json.Unmarshal(delegateTrace, &trace))
unsupportedOpcodes := make(map[string]struct{})
for _, val := range cfg.L2Config.SkippedOpcodes {
unsupportedOpcodes[val] = struct{}{}
}
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
func prepareRelayerClient(client *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
messengerABI, _ := bridge_abi.L1MessengerMetaData.GetAbi()
return l2.NewL2WatcherClient(context.Background(), client, 0, 1, map[string]struct{}{}, contractAddr, messengerABI, db)
}
func prepareAuth(t *testing.T, client *ethclient.Client, private string) *bind.TransactOpts {
privateKey, err := crypto.HexToECDSA(private)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
auth.Nonce = big.NewInt(int64(nonce))
auth.Value = big.NewInt(0) // in wei
auth.GasLimit = uint64(30000000) // in units
auth.GasPrice, err = client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
return auth
}

251
bridge/mock/mock.go Normal file
View File

@@ -0,0 +1,251 @@
package mock
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/binary"
"encoding/json"
"io"
"math/big"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/gorilla/websocket"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/common/message"
"scroll-tech/coordinator"
coordinator_config "scroll-tech/coordinator/config"
bridge_config "scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
docker_db "scroll-tech/database/docker"
)
// PerformHandshake sets up a websocket client to connect to the roller manager.
func PerformHandshake(t *testing.T, c *websocket.Conn) {
// Try to perform handshake
pk, sk := generateKeyPair()
authMsg := &message.AuthMessage{
Identity: message.Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
PublicKey: common.Bytes2Hex(pk),
},
Signature: "",
}
hash, err := authMsg.Identity.Hash()
assert.NoError(t, err)
sig, err := secp256k1.Sign(hash, sk)
assert.NoError(t, err)
authMsg.Signature = common.Bytes2Hex(sig)
b, err := json.Marshal(authMsg)
assert.NoError(t, err)
msg := &message.Msg{
Type: message.Register,
Payload: b,
}
b, err = json.Marshal(msg)
assert.NoError(t, err)
assert.NoError(t, c.WriteMessage(websocket.BinaryMessage, b))
}
func generateKeyPair() (pubkey, privkey []byte) {
key, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)
if err != nil {
panic(err)
}
pubkey = elliptic.Marshal(secp256k1.S256(), key.X, key.Y)
privkey = make([]byte, 32)
blob := key.D.Bytes()
copy(privkey[32-len(blob):], blob)
return pubkey, privkey
}
// SetupMockVerifier sets up a mocked verifier for a test case.
func SetupMockVerifier(t *testing.T, verifierEndpoint string) {
err := os.RemoveAll(verifierEndpoint)
assert.NoError(t, err)
l, err := net.Listen("unix", verifierEndpoint)
assert.NoError(t, err)
go func() {
conn, err := l.Accept()
assert.NoError(t, err)
// Simply read all incoming messages and send a true boolean straight back.
for {
// Read length
buf := make([]byte, 4)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Read message
msgLength := binary.LittleEndian.Uint64(buf)
buf = make([]byte, msgLength)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Return boolean
buf = []byte{1}
_, err = conn.Write(buf)
assert.NoError(t, err)
}
}()
}
// L1GethTestConfig is the http and web socket port of l1geth docker
type L1GethTestConfig struct {
HPort int
WPort int
}
// L2GethTestConfig is the http and web socket port of l2geth docker
type L2GethTestConfig struct {
HPort int
WPort int
}
// DbTestconfig is the test config of database docker
type DbTestconfig struct {
DbName string
DbPort int
DB_CONFIG *database.DBConfig
}
// TestConfig is the config for test
type TestConfig struct {
L1GethTestConfig
L2GethTestConfig
DbTestconfig
}
// NewTestL1Docker starts and returns l1geth docker
func NewTestL1Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l1geth", "", "", tcfg.L1GethTestConfig.HPort, tcfg.L1GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// NewTestL2Docker starts and returns l2geth docker
func NewTestL2Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l2geth", "", "", tcfg.L2GethTestConfig.HPort, tcfg.L2GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// GetDbDocker starts and returns database docker
func GetDbDocker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_db := docker_db.NewImgDB(t, "postgres", "123456", tcfg.DbName, tcfg.DbPort)
assert.NoError(t, img_db.Start())
return img_db
}
// L2gethDocker return mock l2geth client created with docker for test
func L2gethDocker(t *testing.T, cfg *bridge_config.Config, tcfg *TestConfig) (*l2.Backend, docker.ImgInstance, docker.ImgInstance) {
// initialize l2geth docker image
img_geth := NewTestL2Docker(t, tcfg)
cfg.L2Config.Endpoint = img_geth.Endpoint()
// initialize db docker image
img_db := GetDbDocker(t, tcfg)
db, err := database.NewOrmFactory(&database.DBConfig{
DriverName: "postgres",
DSN: img_db.Endpoint(),
})
assert.NoError(t, err)
client, err := l2.New(context.Background(), cfg.L2Config, db)
assert.NoError(t, err)
return client, img_geth, img_db
}
// SetupRollerManager return coordinator.Manager for testcase
func SetupRollerManager(t *testing.T, cfg *coordinator_config.Config, orm database.OrmFactory) *coordinator.Manager {
// Load config file.
ctx := context.Background()
SetupMockVerifier(t, cfg.RollerManagerConfig.VerifierEndpoint)
rollerManager, err := coordinator.New(ctx, cfg.RollerManagerConfig, orm)
assert.NoError(t, err)
// Start rollermanager modules.
err = rollerManager.Start()
assert.NoError(t, err)
return rollerManager
}
// ClearDB clears db
func ClearDB(t *testing.T, db_cfg *database.DBConfig) {
factory, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
db := factory.GetDB()
version0 := int64(0)
err = migrate.Rollback(db.DB, &version0)
assert.NoError(t, err)
err = migrate.Migrate(db.DB)
assert.NoError(t, err)
err = db.DB.Close()
assert.NoError(t, err)
}
// PrepareDB will return DB for testcase
func PrepareDB(t *testing.T, db_cfg *database.DBConfig) database.OrmFactory {
db, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
return db
}
// SendTxToL2Client will send a default Tx by calling l2geth client
func SendTxToL2Client(t *testing.T, client *ethclient.Client, private string) *types.Transaction {
privateKey, err := crypto.HexToECDSA(private)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, err)
value := big.NewInt(1000000000) // in wei
gasLimit := uint64(30000000) // in units
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
tx := types.NewTransaction(nonce, toAddress, value, gasLimit, gasPrice, nil)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privateKey)
assert.NoError(t, err)
assert.NoError(t, client.SendTransaction(context.Background(), signedTx))
return signedTx
}

View File

@@ -1,187 +0,0 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL1 {
/*********************************
* Events from L1ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/************************
* Events from ZKRollup *
************************/
/// @notice Emitted when a new batch is commited.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @notice Emitted when a batch is reverted.
/// @param _batchId The identification of the batch.
event RevertBatch(bytes32 indexed _batchId);
/// @notice Emitted when a batch is finalized.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/***********
* Structs *
***********/
struct L2MessageProof {
uint256 batchIndex;
uint256 blockHeight;
bytes merkleProof;
}
/// @dev The transanction struct
struct Layer2Transaction {
address caller;
uint64 nonce;
address target;
uint64 gas;
uint256 gasPrice;
uint256 value;
bytes data;
// signature
uint256 r;
uint256 s;
uint64 v;
}
/// @dev The block header struct
struct Layer2BlockHeader {
bytes32 blockHash;
bytes32 parentHash;
uint256 baseFee;
bytes32 stateRoot;
uint64 blockHeight;
uint64 gasUsed;
uint64 timestamp;
bytes extraData;
Layer2Transaction[] txs;
}
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
struct Layer2Batch {
uint64 batchIndex;
// The hash of the last block in the parent batch
bytes32 parentHash;
Layer2BlockHeader[] blocks;
}
struct Layer2BatchStored {
bytes32 batchHash;
bytes32 parentHash;
uint64 batchIndex;
bool verified;
}
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/// @notice Mapping from batch id to batch struct.
mapping(bytes32 => Layer2BatchStored) public batches;
/************************************
* Functions from L1ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
uint256 _nonce = messageNonce;
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce += 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
/***************************
* Functions from ZKRollup *
***************************/
function commitBatch(Layer2Batch memory _batch) external {
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
Layer2BatchStored storage _batchStored = batches[_batchId];
_batchStored.batchHash = _batchHash;
_batchStored.parentHash = _batch.parentHash;
_batchStored.batchIndex = _batch.batchIndex;
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
}
function revertBatch(bytes32 _batchId) external {
emit RevertBatch(_batchId);
}
function finalizeBatchWithProof(
bytes32 _batchId,
uint256[] memory,
uint256[] memory
) external {
Layer2BatchStored storage _batch = batches[_batchId];
uint256 _batchIndex = _batch.batchIndex;
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
}
/// @dev Internal function to compute a unique batch id for mapping.
/// @param _batchHash The hash of the batch.
/// @param _parentHash The hash of the batch.
/// @param _batchIndex The index of the batch.
/// @return Return the computed batch id.
function _computeBatchId(
bytes32 _batchHash,
bytes32 _parentHash,
uint256 _batchIndex
) internal pure returns (bytes32) {
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
}
}

View File

@@ -1,67 +0,0 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL2 {
/*********************************
* Events from L2ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/************************************
* Functions from L2ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _nonce = messageNonce;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce = _nonce + 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
}

View File

@@ -0,0 +1,42 @@
//SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract Mock_Bridge {
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
function sendMessage(
address _to,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
// @todo compute fee
uint256 _fee = 0;
uint256 _nonce = messageNonce;
require(msg.value >= _fee, "cannot pay fee");
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
unchecked {
messageNonce = _nonce + 1;
}
}
}

View File

@@ -1,169 +0,0 @@
package sender
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
)
type accountPool struct {
client *ethclient.Client
minBalance *big.Int
accounts map[common.Address]*bind.TransactOpts
accsCh chan *bind.TransactOpts
}
// newAccounts creates an accountPool instance.
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
if minBalance == nil {
minBalance = big.NewInt(0)
minBalance.SetString("100000000000000000000", 10)
}
accs := &accountPool{
client: client,
minBalance: minBalance,
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
}
// get chainID from client
chainID, err := client.ChainID(ctx)
if err != nil {
return nil, err
}
for _, privStr := range privs {
auth, err := bind.NewKeyedTransactorWithChainID(privStr, chainID)
if err != nil {
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
accs.accounts[auth.From] = auth
accs.accsCh <- auth
}
return accs, accs.checkAndSetBalances(ctx)
}
// getAccount get auth from channel.
func (a *accountPool) getAccount() *bind.TransactOpts {
select {
case auth := <-a.accsCh:
return auth
default:
return nil
}
}
// releaseAccount set used auth into channel.
func (a *accountPool) releaseAccount(auth *bind.TransactOpts) {
a.accsCh <- auth
}
// reSetNonce reset nonce if send signed tx failed.
func (a *accountPool) resetNonce(ctx context.Context, auth *bind.TransactOpts) {
nonce, err := a.client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Warn("failed to reset nonce", "address", auth.From.String(), "err", err)
return
}
auth.Nonce = big.NewInt(int64(nonce))
}
// checkAndSetBalance check balance and set min balance.
func (a *accountPool) checkAndSetBalances(ctx context.Context) error {
var (
root *bind.TransactOpts
maxBls = big.NewInt(0)
lostAuths []*bind.TransactOpts
)
for addr, auth := range a.accounts {
bls, err := a.client.BalanceAt(ctx, addr, nil)
if err != nil || bls.Cmp(a.minBalance) < 0 {
if err != nil {
log.Warn("failed to get balance", "address", addr.String(), "err", err)
return err
}
lostAuths = append(lostAuths, auth)
continue
} else if bls.Cmp(maxBls) > 0 { // Find the biggest balance account.
root, maxBls = auth, bls
}
}
if root == nil {
return fmt.Errorf("no account has enough balance")
}
if len(lostAuths) == 0 {
return nil
}
var (
tx *types.Transaction
err error
)
for _, auth := range lostAuths {
tx, err = a.createSignedTx(root, &auth.From, a.minBalance)
if err != nil {
return err
}
err = a.client.SendTransaction(ctx, tx)
if err != nil {
log.Error("Failed to send balance to account", "err", err)
return err
}
log.Debug("send balance to account", "account", auth.From.String(), "balance", a.minBalance.String())
}
// wait util mined
if _, err = bind.WaitMined(ctx, a.client, tx); err != nil {
return err
}
// Reset root's nonce.
a.resetNonce(ctx, root)
return nil
}
func (a *accountPool) createSignedTx(from *bind.TransactOpts, to *common.Address, value *big.Int) (*types.Transaction, error) {
gasPrice, err := a.client.SuggestGasPrice(context.Background())
if err != nil {
return nil, err
}
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := a.client.PendingNonceAt(context.Background(), from.From)
if err != nil {
return nil, err
}
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: to,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := from.Signer(from.From, tx)
if err != nil {
return nil, err
}
return signedTx, nil
}

View File

@@ -3,16 +3,14 @@ package sender
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
@@ -20,8 +18,6 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/config"
)
@@ -36,11 +32,6 @@ const (
LegacyTxType = "LegacyTx"
)
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
)
// DefaultSenderConfig The default config
var DefaultSenderConfig = config.SenderConfig{
Endpoint: "",
@@ -72,7 +63,6 @@ type PendingTransaction struct {
submitAt uint64
id string
feeData *FeeData
signer *bind.TransactOpts
tx *types.Transaction
}
@@ -83,20 +73,20 @@ type Sender struct {
chainID *big.Int // The chain id of the endpoint
ctx context.Context
// account fields.
auths *accountPool
mu sync.Mutex
auth *bind.TransactOpts
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
sendTxErrCh chan error
stopCh chan struct{}
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
func NewSender(ctx context.Context, config *config.SenderConfig, prv *ecdsa.PrivateKey) (*Sender, error) {
if config == nil {
config = &DefaultSenderConfig
}
@@ -111,10 +101,20 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return nil, err
}
auths, err := newAccountPool(ctx, config.MinBalance, client, privs)
auth, err := bind.NewKeyedTransactorWithChainID(prv, chainID)
if err != nil {
return nil, fmt.Errorf("failed to create account pool, err: %v", err)
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
}
log.Info("sender", "chainID", chainID.String(), "address", auth.From.String())
// set nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Error("failed to get pending nonce", "address", auth.From.String(), "err", err)
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
// get header by number
header, err := client.HeaderByNumber(ctx, nil)
@@ -122,29 +122,20 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return nil, err
}
var baseFeePerGas uint64
if config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
baseFeePerGas = header.BaseFee.Uint64()
} else {
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
sender := &Sender{
ctx: ctx,
config: config,
client: client,
chainID: chainID,
auths: auths,
auth: auth,
sendTxErrCh: make(chan error, 4),
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
baseFeePerGas: header.BaseFee.Uint64(),
pendingTxs: sync.Map{},
stopCh: make(chan struct{}),
}
go sender.loop(ctx)
go sender.loop()
return sender, nil
}
@@ -160,14 +151,8 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
return s.confirmCh
}
// NumberOfAccounts return the count of accounts.
func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
func (s *Sender) getFeeData(msg ethereum.CallMsg) (*FeeData, error) {
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
if err != nil {
return nil, err
}
@@ -201,38 +186,35 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
if _, ok := s.pendingTxs.Load(ID); ok {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
}
}()
var (
// estimate gas limit
call = ethereum.CallMsg{
From: s.auth.From,
To: target,
Gas: 0,
GasPrice: nil,
GasFeeCap: nil,
GasTipCap: nil,
Value: value,
Data: data,
AccessList: make(types.AccessList, 0),
}
feeData *FeeData
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(auth, target, value, data); err != nil {
if feeData, err = s.getFeeData(call); err != nil {
return
}
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
if tx, err = s.createAndSendTx(feeData, target, value, data); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{
tx: tx,
id: ID,
signer: auth,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
@@ -243,17 +225,14 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
return
}
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte) (tx *types.Transaction, err error) {
s.mu.Lock()
defer s.mu.Unlock()
var (
nonce = auth.Nonce.Uint64()
nonce = s.auth.Nonce.Uint64()
txData types.TxData
)
// this is a resubmit call, override the nonce
if overrideNonce != nil {
nonce = *overrideNonce
}
// lock here to avoit blocking when call `SuggestGasPrice`
switch s.config.TxType {
case LegacyTxType:
@@ -301,29 +280,25 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
}
// sign and send
tx, err = auth.Signer(auth.From, types.NewTx(txData))
tx, err = s.auth.Signer(s.auth.From, types.NewTx(txData))
if err != nil {
log.Error("failed to sign tx", "err", err)
return
}
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
// Check if contain nonce, and reset nonce
// only reset nonce when it is not from resubmit
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
s.auths.resetNonce(context.Background(), auth)
}
s.sendTxErrCh <- err
return
}
// update nonce when it is not from resubmit
if overrideNonce == nil {
auth.Nonce = big.NewInt(int64(nonce + 1))
}
// update nonce
s.auth.Nonce = big.NewInt(int64(nonce + 1))
return
}
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
func (s *Sender) resubmitTransaction(feeData *FeeData, tx *types.Transaction) (*types.Transaction, error) {
// @todo move query out of lock scope
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
@@ -359,34 +334,19 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
feeData.gasTipCap = gasTipCap
}
nonce := tx.Nonce()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
return s.createAndSendTx(feeData, tx.To(), tx.Value(), tx.Data())
}
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
func (s *Sender) CheckPendingTransaction(header *types.Header) {
number := header.Number.Uint64()
atomic.StoreUint64(&s.blockNumber, number)
if s.config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
} else {
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
s.pendingTxs.Range(func(key, value interface{}) bool {
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
return true
}
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
s.pendingTxs.Delete(key)
// send confirm message
s.confirmCh <- &Confirmation{
@@ -397,43 +357,9 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
tx, err := s.resubmitTransaction(pending.feeData, pending.tx)
if err != nil {
// If account pool is empty, it will try again in next loop.
if !errors.Is(err, ErrNoAvailableAccount) {
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
}
// This means one of the old transactions is confirmed
// One scenario is
// 1. Initially, we replace the tx three times and submit it to local node.
// Currently, we keep the last tx hash in the memory.
// 2. Other node packed the 2-nd tx or 3-rd tx, and the local node has received the block now.
// 3. When we resubmit the 4-th tx, we got a nonce error.
// 4. We need to check the status of 3-rd tx stored in our memory
// 4.1 If the 3-rd tx is packed, we got a receipt and 3-nd is marked as confirmed.
// 4.2 If the 2-nd tx is packed, we got nothing from `TransactionReceipt` call. Since we
// cannot do anything about, we just log some information. In this case, the caller
// of `sender.SendTransaction` should write extra code to handle the situation.
// Another scenario is private key leaking and someone send a transaction with the same nonce.
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pending.tx.Hash(),
}
} else {
// The receipt can be nil since the confirmed transaction may not be the latest one.
// We just ignore it, the caller of the sender pool should handle this situation.
log.Warn("Pending transaction is confirmed by one of the replaced transactions", "key", key, "signer", pending.signer.From, "nonce", pending.tx.Nonce())
}
}
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
} else {
// flush submitAt
pending.tx = tx
@@ -445,13 +371,10 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
// Loop is the main event loop
func (s *Sender) loop(ctx context.Context) {
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
func (s *Sender) loop() {
t := time.Duration(s.config.CheckPendingTime) * time.Second
checkTick := time.NewTicker(t)
defer checkTick.Stop()
checkBalanceTicker := time.NewTicker(time.Minute * 10)
defer checkBalanceTicker.Stop()
for {
select {
case <-checkTick.C:
@@ -460,19 +383,18 @@ func (s *Sender) loop(ctx context.Context) {
log.Error("failed to get latest head", "err", err)
continue
}
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
if err != nil {
log.Error("failed to get latest confirmed block number", "err", err)
continue
s.CheckPendingTransaction(header)
case err := <-s.sendTxErrCh:
// redress nonce
if strings.Contains(err.Error(), "nonce") {
if nonce, err := s.client.PendingNonceAt(s.ctx, s.auth.From); err != nil {
log.Error("failed to get pending nonce", "address", s.auth.From.String(), "err", err)
} else {
s.mu.Lock()
s.auth.Nonce = big.NewInt(int64(nonce))
s.mu.Unlock()
}
}
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
// Check and set balance.
_ = s.auths.checkAndSetBalances(ctx)
case <-ctx.Done():
return
case <-s.stopCh:
return
}

View File

@@ -3,7 +3,6 @@ package sender_test
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
@@ -14,113 +13,107 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/sender"
)
const TXBatch = 50
const TX_BATCH = 100
var (
privateKeys []*ecdsa.PrivateKey
cfg *config.Config
l2gethImg docker.ImgInstance
TestConfig = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8576,
},
}
l1gethImg docker.ImgInstance
private *ecdsa.PrivateKey
)
func setupEnv(t *testing.T) {
var err error
cfg, err = config.NewConfig("../config.json")
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
prv, err := crypto.HexToECDSA(cfg.L2Config.RelayerConfig.PrivateKey)
assert.NoError(t, err)
// Load default private key.
privateKeys = []*ecdsa.PrivateKey{priv}
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
private = prv
l1gethImg = mock.NewTestL1Docker(t, TestConfig)
}
func TestSender(t *testing.T) {
func TestFunction(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test Run sender", func(t *testing.T) {
// set config
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
// create newSender
newSender, err := sender.NewSender(context.Background(), cfg.L2Config.RelayerConfig.SenderConfig, private)
assert.NoError(t, err)
defer newSender.Stop()
assert.NoError(t, err)
// send transactions
idCache := cmap.New()
confirmCh := newSender.ConfirmChan()
var (
eg errgroup.Group
errCh chan error
)
go func() {
for i := 0; i < TX_BATCH; i++ {
//toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
toAddr := common.BigToAddress(big.NewInt(int64(i + 1000)))
id := strconv.Itoa(i + 1000)
eg.Go(func() error {
txHash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if err != nil {
t.Error("failed to send tx", "err", err)
return err
}
t.Log("successful send a tx", "ID", id, "tx hash", txHash.String())
idCache.Set(id, struct{}{})
return nil
})
}
errCh <- eg.Wait()
}()
// avoid 10 mins cause testcase panic
after := time.After(60 * time.Second)
for {
select {
case cmsg := <-confirmCh:
t.Log("get confirmations of", "ID: ", cmsg.ID, "status: ", cmsg.IsSuccessful)
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case err := <-errCh:
if err != nil {
t.Errorf("failed to send tx, err: %v", err)
return
}
assert.NoError(t, err)
case <-after:
t.Logf("newSender test failed because timeout")
t.FailNow()
}
}
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l2gethImg.Stop())
assert.NoError(t, l1gethImg.Stop())
})
}
func testBatchSender(t *testing.T, batchSize int) {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()
if err != nil {
t.Fatal(err)
}
privateKeys = append(privateKeys, priv)
}
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
}
defer newSender.Stop()
// send transactions
var (
eg errgroup.Group
idCache = cmap.New()
confirmCh = newSender.ConfirmChan()
)
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if errors.Is(err, sender.ErrNoAvailableAccount) {
<-time.After(time.Second)
continue
}
if err != nil {
return err
}
idCache.Set(id, struct{}{})
}
return nil
})
}
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)
for {
select {
case cmsg := <-confirmCh:
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case <-after:
t.Error("newSender test failed because of timeout")
return
}
}
}

View File

@@ -1,206 +0,0 @@
package tests
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker"
)
var (
// config
cfg *config.Config
// private key
privateKey *ecdsa.PrivateKey
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
// clients
l1Client *ethclient.Client
l2Client *ethclient.Client
// auth
l1Auth *bind.TransactOpts
l2Auth *bind.TransactOpts
// l1 messenger contract
l1MessengerInstance *mock_bridge.MockBridgeL1
l1MessengerAddress common.Address
// l1 rollup contract
l1RollupInstance *mock_bridge.MockBridgeL1
l1RollupAddress common.Address
// l2 messenger contract
l2MessengerInstance *mock_bridge.MockBridgeL2
l2MessengerAddress common.Address
)
func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
assert.NoError(t, err)
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func prepareContracts(t *testing.T) {
var err error
var tx *types.Transaction
// L1 messenger contract
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L1 rollup contract
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L2 messenger contract
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
assert.NoError(t, err)
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
assert.NoError(t, err)
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
cfg.L1Config.RollupContractAddress = l1RollupAddress
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
}
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
return auth
}
func TestFunction(t *testing.T) {
setupEnv(t)
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -1,174 +0,0 @@
package tests
import (
"context"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
confirmations := rpc.LatestBlockNumber
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents()
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

View File

@@ -1,132 +0,0 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// add some blocks to db
var traces []*types.BlockTrace
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := types.Header{
Number: big.NewInt(int64(i)),
ParentHash: parentHash,
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &types.BlockTrace{
Header: &header,
StorageTrace: &types.StorageTrace{},
})
parentHash = header.Hash()
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add one batch to db
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[1].Header.Number.Uint64(),
Hash: traces[1].Header.Hash().String(),
ParentHash: traces[1].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
}

View File

@@ -1,56 +0,0 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -1,107 +0,0 @@
package utils_test
import (
"context"
"encoding/json"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/bridge/utils"
)
var (
tests = []struct {
input string
mustFail bool
expected rpc.BlockNumber
}{
{`"0x"`, true, rpc.BlockNumber(0)},
{`"0x0"`, false, rpc.BlockNumber(0)},
{`"0X1"`, false, rpc.BlockNumber(1)},
{`"0x00"`, true, rpc.BlockNumber(0)},
{`"0x01"`, true, rpc.BlockNumber(0)},
{`"0x1"`, false, rpc.BlockNumber(1)},
{`"0x12"`, false, rpc.BlockNumber(18)},
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
{"0", true, rpc.BlockNumber(0)},
{`"ff"`, true, rpc.BlockNumber(0)},
{`"safe"`, false, rpc.SafeBlockNumber},
{`"finalized"`, false, rpc.FinalizedBlockNumber},
{`"pending"`, false, rpc.PendingBlockNumber},
{`"latest"`, false, rpc.LatestBlockNumber},
{`"earliest"`, false, rpc.EarliestBlockNumber},
{`someString`, true, rpc.BlockNumber(0)},
{`""`, true, rpc.BlockNumber(0)},
{``, true, rpc.BlockNumber(0)},
}
)
func TestUnmarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
err := json.Unmarshal([]byte(test.input), &num)
if test.mustFail && err == nil {
t.Errorf("Test %d should fail", i)
continue
}
if !test.mustFail && err != nil {
t.Errorf("Test %d should pass but got err: %v", i, err)
continue
}
if num != test.expected {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.Nil(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.Nil(t, err)
got, err := json.Marshal(&num)
assert.Nil(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
}
type MockEthClient struct {
val uint64
}
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
return e.val, nil
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
client.val = 5
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(0), confirmed)
client.val = 7
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(1), confirmed)
}

View File

@@ -1,69 +0,0 @@
package utils
import (
"bytes"
"math/big"
"github.com/iden3/go-iden3-crypto/keccak256"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(keccak256.Hash(append(a.Bytes()[:], b.Bytes()[:]...)))
}
func encodePacked(input ...[]byte) []byte {
return bytes.Join(input, nil)
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
message []byte,
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),
math.U256Bytes(messageNonce),
message,
)
return common.BytesToHash(keccak256.Hash(packed))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
func BufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}

View File

@@ -1,41 +0,0 @@
package utils_test
import (
"math/big"
"testing"
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
)
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
}

View File

@@ -179,13 +179,6 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -207,7 +200,16 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -215,6 +217,18 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -1,26 +1,13 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
# Build bridge in a stock Go builder container
FROM scrolltech/go-builder:1.18 as builder
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
COPY ./ /
# Build bridge
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
RUN cd /bridge/cmd && go build -v -p 4 -o bridge
# Pull bridge into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/bridge /bin/
COPY --from=builder /bridge/cmd/bridge /bin/
ENTRYPOINT ["bridge"]

View File

@@ -1,5 +1,7 @@
assets/
coordinator/
docs/
integration-test/
l2geth/
roller/
rpc-gateway/
*target/*

View File

@@ -1,49 +1,13 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as chef
WORKDIR app
# Build scroll in a stock Go builder container
FROM scrolltech/go-builder:1.18 as builder
FROM chef as planner
COPY ./common/libzkp/impl/ .
RUN cargo chef prepare --recipe-path recipe.json
COPY ./ /
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
RUN cd /coordinator/cmd && go build -v -p 4 -o coordinator
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
# Pull scroll into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /coordinator/cmd/coordinator /bin/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/verifier/lib
# ENV CHAIN_ID=534353
RUN mkdir -p /src/coordinator/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
COPY --from=builder /bin/coordinator /bin/
ENTRYPOINT ["/bin/coordinator"]
ENTRYPOINT ["coordinator"]

View File

@@ -1,6 +1,8 @@
assets/
bridge/
contracts/
docs/
integration-test/
l2geth/
roller/
rpc-gateway/
*target/*

View File

@@ -1,26 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build db_cli
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
ENTRYPOINT ["db_cli"]

View File

@@ -1,6 +0,0 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,16 +1,12 @@
GO_VERSION := 1.18
PYTHON_VERSION := 3.10
RUST_VERSION := nightly-2022-12-10
RUST_VERSION := nightly-2022-08-23
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
.PHONY: all go-builder rust-builder rust-alpine-builder go-rust-builder py-runner
go-rust-builder:
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
go-alpine-builder:
docker build -t scrolltech/go-alpine-builder:latest -f go-alpine-builder.Dockerfile ./
docker image tag scrolltech/go-alpine-builder:latest scrolltech/go-alpine-builder:$(GO_VERSION)
go-builder:
docker build -t scrolltech/go-builder:latest -f go-builder.Dockerfile ./
docker image tag scrolltech/go-builder:latest scrolltech/go-builder:$(GO_VERSION)
rust-builder:
docker build -t scrolltech/rust-builder:latest -f rust-builder.Dockerfile ./
@@ -20,25 +16,23 @@ rust-alpine-builder:
docker build -t scrolltech/rust-alpine-builder:latest -f rust-alpine-builder.Dockerfile ./
docker image tag scrolltech/rust-alpine-builder:latest scrolltech/rust-alpine-builder:$(RUST_VERSION)
go-rust-alpine-builder:
docker build -t scrolltech/go-rust-alpine-builder:latest -f go-rust-alpine-builder.Dockerfile ./
docker image tag scrolltech/go-rust-alpine-builder:latest scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
go-rust-builder:
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
py-runner:
docker build -t scrolltech/py-runner:latest -f py-runner.Dockerfile ./
docker image tag scrolltech/py-runner:latest scrolltech/py-runner:$(PYTHON_VERSION)
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
all: go-builder rust-builder rust-alpine-builder go-rust-builder py-runner
publish:
docker push scrolltech/go-alpine-builder:latest
docker push scrolltech/go-alpine-builder:$(GO_VERSION)
docker push scrolltech/go-builder:latest
docker push scrolltech/go-builder:$(GO_VERSION)
docker push scrolltech/rust-builder:latest
docker push scrolltech/rust-builder:$(RUST_VERSION)
docker push scrolltech/rust-alpine-builder:latest
docker push scrolltech/rust-alpine-builder:$(RUST_VERSION)
docker push scrolltech/go-rust-alpine-builder:latest
docker push scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
docker push scrolltech/go-rust-builder:latest
docker push scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
docker push scrolltech/py-runner:latest

View File

@@ -4,4 +4,4 @@ FROM golang:1.18-alpine
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates

View File

@@ -1,35 +0,0 @@
FROM golang:1.18-alpine
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
# RUN apk add --no-cache libc6-compat
# RUN apk add --no-cache gcompat
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN set -eux; \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) rustArch='x86_64-unknown-linux-musl' ;; \
aarch64) rustArch='aarch64-unknown-linux-musl' ;; \
*) echo >&2 "unsupported architecture: $apkArch"; exit 1 ;; \
esac; \
\
url="https://static.rust-lang.org/rustup/dist/${rustArch}/rustup-init"; \
wget "$url"; \
chmod +x rustup-init;
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
&& rm -rf $CARGO_HOME/registry/

View File

@@ -1,34 +1,31 @@
FROM ubuntu:20.04
FROM golang:1.18-alpine
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
RUN set -eux; \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) rustArch='x86_64-unknown-linux-musl' ;; \
aarch64) rustArch='aarch64-unknown-linux-musl' ;; \
*) echo >&2 "unsupported architecture: $apkArch"; exit 1 ;; \
esac; \
\
url="https://static.rust-lang.org/rustup/dist/${rustArch}/rustup-init"; \
wget "$url"; \
chmod +x rustup-init;
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
# Add Toolchain
RUN rustup toolchain install nightly-2022-12-10
# TODO: make this ARG
ENV CARGO_CHEF_TAG=0.1.41
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
&& rm -rf $CARGO_HOME/registry/
# Install Go
RUN rm -rf /usr/local/go
# for 1.17
# RUN wget https://go.dev/dl/go1.17.13.linux-amd64.tar.gz
# RUN tar -C /usr/local -xzf go1.17.13.linux-amd64.tar.gz
# for 1.18
RUN wget https://go.dev/dl/go1.18.9.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go1.18.9.linux-amd64.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,19 +1,16 @@
ARG ALPINE_VERSION=3.15
FROM alpine:${ALPINE_VERSION}
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
RUN apk add --no-cache \
ca-certificates \
openssl-dev \
gcc \
git \
musl-dev
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
CARGO_NET_GIT_FETCH_WITH_CLI=true
PATH=/usr/local/cargo/bin:$PATH
RUN set -eux; \
apkArch="$(apk --print-arch)"; \

View File

@@ -3,7 +3,7 @@ FROM ubuntu:20.04
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
RUN apt-get install build-essential curl git pkg-config -y
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
@@ -13,4 +13,4 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
# Add Toolchain
RUN rustup toolchain install nightly-2022-12-10
RUN rustup toolchain install nightly-2022-08-23

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -uex
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \
package2=coverage.db.xml \
package3=coverage.common.xml \
package4=coverage.coordinator.xml \
package5=coverage.integration.xml

View File

@@ -1,63 +0,0 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Tag') {
steps {
script {
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
sh "echo ${TAGNAME}"
// ...
}
}
}
stage('Build') {
environment {
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
DOCKER_CREDENTIALS = credentials('dockerhub')
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
// Use a scripted pipeline.
script {
stage('Push image') {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
}
}
}

2
common/.gitignore vendored
View File

@@ -1,4 +1,2 @@
/build/bin
.idea
libzkp/impl/target
libzkp/interface/*.a

View File

@@ -5,4 +5,3 @@ test:
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings

View File

@@ -1,89 +0,0 @@
package cmd
import (
"os"
"os/exec"
"strings"
"sync"
"testing"
cmap "github.com/orcaman/concurrent-map"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
name string
args []string
mu sync.Mutex
cmd *exec.Cmd
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
//stdout bytes.Buffer
Err error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
return &Cmd{
T: t,
checkFuncs: cmap.New(),
name: name,
args: args,
}
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(parallel bool) {
t.Log("cmd: ", t.args)
if parallel {
go t.runCmd()
} else {
t.runCmd()
}
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf("%s: %v", t.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf("%s: %v", t.name, out)
}
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)
check(out)
})
return len(data), nil
}

View File

@@ -1,114 +0,0 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

View File

@@ -1,42 +0,0 @@
package cmd_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/common/cmd"
)
func TestCmd(t *testing.T) {
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
tm := time.Now()
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
okCh := make(chan struct{}, 1)
app.RegistFunc(curTime, func(buf string) {
if strings.Contains(buf, curTime) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer app.UnRegistFunc(curTime)
// Run cmd.
app.RunCmd(true)
// Wait result.
select {
case <-okCh:
return
case <-time.After(time.Second):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
}
}

84
common/docker/cmd.go Normal file
View File

@@ -0,0 +1,84 @@
package docker
import (
"errors"
"os/exec"
"strings"
"sync"
"testing"
)
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
checkFuncs sync.Map //map[string]checkFunc
//stdout bytes.Buffer
errMsg chan error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T) *Cmd {
cmd := &Cmd{
T: t,
//stdout: bytes.Buffer{},
errMsg: make(chan error, 2),
}
cmd.RegistFunc("panic", func(buf string) {
if strings.Contains(buf, "panic") {
cmd.errMsg <- errors.New(buf)
}
})
return cmd
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Store(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
if _, ok := t.checkFuncs.Load(key); ok {
t.checkFuncs.Delete(key)
}
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(args []string, parallel bool) {
t.Log("RunCmd cmd", args)
if parallel {
go t.runCmd(args)
} else {
t.runCmd(args)
}
}
// ErrMsg return error output channel
func (t *Cmd) ErrMsg() <-chan error {
return t.errMsg
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
t.Logf(out)
go func(content string) {
t.checkFuncs.Range(func(key, value interface{}) bool {
check := value.(checkFunc)
check(content)
return true
})
}(out)
return len(data), nil
}
func (t *Cmd) runCmd(args []string) {
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}

View File

@@ -9,11 +9,22 @@ import (
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
"github.com/docker/docker/client"
)
var (
cli *client.Client
)
func init() {
var err error
cli, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
cli.NegotiateAPIVersion(context.Background())
}
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
@@ -26,21 +37,20 @@ type ImgGeth struct {
wsPort int
running bool
cmd *cmd.Cmd
*Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
img := &ImgGeth{
return &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
name: fmt.Sprintf("%s-%d", image, time.Now().Unix()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
@@ -49,7 +59,7 @@ func (i *ImgGeth) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.cmd.RunCmd(true)
i.Cmd.RunCmd(i.prepare(), true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -76,7 +86,7 @@ func (i *ImgGeth) Endpoint() string {
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
i.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -85,16 +95,13 @@ func (i *ImgGeth) isOk() bool {
}
}
})
defer i.cmd.UnRegistFunc(keyword)
defer i.UnRegistFunc(keyword)
select {
case <-okCh:
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
i.id = GetContainerID(i.name)
return i.id != ""
case <-time.After(time.Second * 10):
case <-time.NewTimer(time.Second * 10).C:
return false
}
}

View File

@@ -4,25 +4,16 @@ import (
"context"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
)
func TestDocker(t *testing.T) {
t.Parallel()
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
}
func testL1Geth(t *testing.T) {
func TestL1Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewTestL1Docker(t)
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -33,11 +24,12 @@ func testL1Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func testL2Geth(t *testing.T) {
func TestL2Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewTestL2Docker(t)
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -47,13 +39,3 @@ func testL2Geth(t *testing.T) {
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func testDB(t *testing.T) {
driverName := "postgres"
dbImg := NewTestDBDocker(t, driverName)
defer dbImg.Stop()
db, err := sqlx.Open(driverName, dbImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}

View File

@@ -5,22 +5,8 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
)
var (
cli *client.Client
)
func init() {
var err error
cli, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
cli.NegotiateAPIVersion(context.Background())
}
// ImgInstance is an img instance.
type ImgInstance interface {
Start() error
@@ -36,7 +22,7 @@ func GetContainerID(name string) string {
Filters: filter,
})
if len(lst) > 0 {
return lst[0].ID
return lst[0].Names[0]
}
return ""
}

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:prealpha-v5.1
FROM scrolltech/l2geth:prealpha-v2.2
RUN mkdir -p /l2geth/keystore

View File

@@ -1,78 +0,0 @@
package docker
import (
"context"
"crypto/rand"
"math/big"
"testing"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
)
// NewTestL1Docker starts and returns l1geth docker
func NewTestL1Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
}
// NewTestL2Docker starts and returns l2geth docker
func NewTestL2Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
}
// NewTestDBDocker starts and returns database docker
func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(5, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
}

View File

@@ -3,31 +3,27 @@ module scroll-tech/common
go 1.18
require (
github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/docker/docker v20.10.17+incompatible
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/ethereum/go-ethereum v1.10.13 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
@@ -36,21 +32,19 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/huin/goupnp v1.0.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.12 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
@@ -64,10 +58,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.4.3 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
@@ -75,16 +66,14 @@ require (
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.6.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.3.0 // indirect
golang.org/x/tools v0.1.12 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -77,9 +77,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -88,8 +87,6 @@ github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3h
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -99,9 +96,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@@ -112,8 +108,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -125,9 +121,8 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -157,8 +152,6 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -214,12 +207,10 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -231,14 +222,12 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12 h1:dXZF+R9iI07DK49LHX/EKC3jTa0O2z+TUyvxjGK7V38=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/iden3/go-iden3-crypto v0.0.13 h1:ixWRiaqDULNyIDdOWz2QQJG5t4PpNHkQk2P6GV94cok=
github.com/iden3/go-iden3-crypto v0.0.13/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8=
@@ -254,15 +243,12 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -286,9 +272,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -299,9 +284,6 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -322,9 +304,6 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -364,15 +343,12 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -396,18 +372,11 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d h1:eh1i1M9BKPCQckNFQsV/yfazQ895hevkWr8GuqhKNrk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d/go.mod h1:SkQ1431r0LkrExTELsapw6JQHYpki8O1mSsObTSmBWU=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -450,15 +419,11 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
@@ -481,8 +446,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -512,8 +477,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -540,8 +505,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -556,9 +521,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -605,8 +569,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -617,9 +581,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -655,13 +618,13 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=

View File

@@ -1,36 +0,0 @@
[package]
name = "zkp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["staticlib"]
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[patch."https://github.com/scroll-tech/zktrie.git"]
zktrie = { git = "https://github.com/lispc/zktrie", branch = "scroll-dev-0215" }
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
log = "0.4"
env_logger = "0.9.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
libc = "0.2"
once_cell = "1.8.0"
[profile.test]
opt-level = 3
debug-assertions = true
[profile.release]
opt-level = 3
debug-assertions = true

View File

@@ -1 +0,0 @@
nightly-2022-12-10

View File

@@ -1,40 +0,0 @@
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use std::fs::File;
use std::io::Read;
use zkevm::circuit::{AGG_DEGREE, DEGREE};
use zkevm::prover::AggCircuitProof;
use zkevm::utils::load_or_create_params;
use zkevm::verifier::Verifier;
static mut VERIFIER: Option<&Verifier> = None;
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path: *const c_char) {
env_logger::init();
let params_path = c_char_to_str(params_path);
let agg_vk_path = c_char_to_str(agg_vk_path);
let mut f = File::open(agg_vk_path).unwrap();
let mut agg_vk = vec![];
f.read_to_end(&mut agg_vk).unwrap();
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
VERIFIER = Some(Box::leak(v))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_agg_proof(proof: *const c_char) -> c_char {
let proof_vec = c_char_to_vec(proof);
let agg_proof = serde_json::from_slice::<AggCircuitProof>(proof_vec.as_slice()).unwrap();
let verified = VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok();
verified as c_char
}

View File

@@ -1,5 +0,0 @@
init_prover(char *params_path, char *seed_path);
char* create_agg_proof(char *trace);
char* create_agg_proof_multi(char *trace);
init_verifier(char *params_path, char *agg_vk_path);
char verify_agg_proof(char *proof);

View File

@@ -2,32 +2,56 @@ package message
import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"encoding/binary"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
"golang.org/x/crypto/blake2s"
)
// RespStatus represents status code from roller to scroll
// MsgType denotes the type of message being sent or received.
type MsgType uint16
const (
// Error message.
Error MsgType = iota
// Register message, sent by a roller when a connection is established.
Register
// BlockTrace message, sent by a sequencer to a roller to notify them
// they need to generate a proof.
BlockTrace
// Proof message, sent by a roller to a sequencer when they have finished
// proof generation of a given set of block traces.
Proof
)
// RespStatus is the status of the proof generation
type RespStatus uint32
const (
// StatusOk means generate proof success
// StatusOk indicates the proof generation succeeded
StatusOk RespStatus = iota
// StatusProofError means generate proof failed
// StatusProofError indicates the proof generation failed
StatusProofError
)
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
// Msg is the top-level message container which contains the payload and the
// message identifier.
type Msg struct {
// Message type
Type MsgType `json:"type"`
// Message payload
Payload []byte `json:"payload"`
}
// AuthMessage is the first message exchanged from the Roller to the Sequencer.
// It effectively acts as a registration, and makes the Roller identification
// known to the Sequencer.
type AuthMsg struct {
type AuthMessage struct {
// Message fields
Identity *Identity `json:"message"`
Identity Identity `json:"message"`
// Roller signature
Signature string `json:"signature"`
}
@@ -36,28 +60,17 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
}
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}
// Sign auth message
func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
func (a *AuthMessage) Sign(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
@@ -68,155 +81,58 @@ func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
a.Signature = common.Bytes2Hex(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.Identity.PublicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.Identity.PublicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
if a.Identity.PublicKey == "" {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.Identity.PublicKey, nil
}
return a.Identity.PublicKey, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
bs, err := json.Marshal(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
hash := blake2s.Sum256(bs)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
// Roller signature
Signature string `json:"signature"`
// Roller public key
publicKey string
// BlockTraces is a wrapper type around types.BlockResult which adds an ID
// that identifies which proof generation session these block traces are
// associated to. This then allows the roller to add the ID back to their
// proof message once generated, and in turn helps the sequencer understand
// where to handle the proof.
type BlockTraces struct {
ID uint64 `json:"id"`
Traces *types.BlockResult `json:"blockTraces"`
}
// Sign signs the ProofMsg.
func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error {
hash, err := a.ProofDetail.Hash()
if err != nil {
return err
}
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies ProofMsg.Signature.
func (a *ProofMsg) Verify() (bool, error) {
hash, err := a.ProofDetail.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.publicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *ProofMsg) PublicKey() (string, error) {
if a.publicKey == "" {
hash, err := a.ProofDetail.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.publicKey, nil
}
return a.publicKey, nil
}
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
ID string `json:"id"`
Traces []*types.BlockTrace `json:"blockTraces"`
}
// ProofDetail is the message received from rollers that contains zk proof, the status of
// ProofMsg is the message received from rollers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
type ProofMsg struct {
Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"`
}
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
ID uint64 `json:"id"`
// FIXME: Maybe we need to allow Proof omitempty? Also in scroll.
Proof *AggProof `json:"proof"`
}
// AggProof includes the proof and public input that are required to verification and rollup.
type AggProof struct {
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
BlockCount uint `json:"block_count"`
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
}
// Marshal marshals the TraceProof as bytes
func (proof *AggProof) Marshal() ([]byte, error) {
jsonByt, err := json.Marshal(proof)
if err != nil {
return nil, err
}
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(len(jsonByt)))
return append(buf, jsonByt...), nil
}

View File

@@ -1,33 +0,0 @@
package message
import (
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestAuthMessageSignAndVerify(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}

View File

@@ -1,53 +0,0 @@
package metrics
import (
"context"
"net"
"net/http"
"strconv"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
// Serve starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Serve(ctx context.Context, c *cli.Context) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
address := net.JoinHostPort(
c.String(utils.MetricsAddr.Name),
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
)
server := &http.Server{
Addr: address,
Handler: prometheus.Handler(metrics.DefaultRegistry),
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
}
go func() {
<-ctx.Done()
if err := server.Close(); err != nil {
log.Error("Failed to close metrics server", "error", err)
}
}()
log.Info("Starting metrics server", "address", address)
go func() {
if err := server.ListenAndServe(); err != nil {
log.Error("start metrics server error", "error", err)
}
}()
}

21936
common/testdata/blockResult_delegate.json vendored Normal file

File diff suppressed because one or more lines are too long

3305
common/testdata/blockResult_orm.json vendored Normal file

File diff suppressed because one or more lines are too long

21936
common/testdata/blockResult_relayer.json vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,544 +0,0 @@
{
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,19 +0,0 @@
package utils
import (
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ComputeBatchID compute a unique hash for a batch using "endBlockHash" & "endBlockHash in last batch"
// & "batch height", following the logic in `_computeBatchId` in contracts/src/L1/rollup/ZKRollup.sol
func ComputeBatchID(endBlockHash common.Hash, lastEndBlockHash common.Hash, index *big.Int) string {
indexBytes := make([]byte, 32)
return crypto.Keccak256Hash(
endBlockHash.Bytes(),
lastEndBlockHash.Bytes(),
index.FillBytes(indexBytes),
).String()
}

Some files were not shown because too many files have changed in this diff Show More