mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
133 Commits
prealpha-v
...
maskpp/res
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f9530faf2 | ||
|
|
ce7962a1f2 | ||
|
|
20e345453e | ||
|
|
8548d6b25d | ||
|
|
d0cd0f654d | ||
|
|
2422bb8953 | ||
|
|
db957af22d | ||
|
|
a6ff1d01dd | ||
|
|
6aa02874c0 | ||
|
|
714343c2b8 | ||
|
|
a26cc23c9a | ||
|
|
4ad17468d0 | ||
|
|
7f1eddb6f1 | ||
|
|
40f703e87d | ||
|
|
fbcabcc5e2 | ||
|
|
eb3f187926 | ||
|
|
7bd80f1656 | ||
|
|
a7a4115fb6 | ||
|
|
d5f0218f5f | ||
|
|
6c3f506e98 | ||
|
|
b589ebeaba | ||
|
|
631915480b | ||
|
|
ee15881a04 | ||
|
|
80058c55d7 | ||
|
|
fa0c8f1ba2 | ||
|
|
fd65128ec1 | ||
|
|
fd17b869cd | ||
|
|
3297e1ef9c | ||
|
|
5fdd2c609c | ||
|
|
828b0592f7 | ||
|
|
0265e1a1fe | ||
|
|
ddb6bf36f4 | ||
|
|
bce93a020d | ||
|
|
5a336bd5a9 | ||
|
|
857442fb6b | ||
|
|
1fc0477f43 | ||
|
|
c17f62718e | ||
|
|
5c9dd1bd32 | ||
|
|
e676f5e44a | ||
|
|
d9bc0842cc | ||
|
|
0156e42e18 | ||
|
|
7ee7f93f3f | ||
|
|
da096f8dd2 | ||
|
|
5ca3d1a03e | ||
|
|
a00d3e9a69 | ||
|
|
0e88b9aa94 | ||
|
|
33a912e7c1 | ||
|
|
e48e76acdf | ||
|
|
f5d02175f8 | ||
|
|
bb76a00613 | ||
|
|
41d71fc274 | ||
|
|
02ea14d721 | ||
|
|
ea9c1c6776 | ||
|
|
16576b6f53 | ||
|
|
aa885f068f | ||
|
|
1f764a579d | ||
|
|
91ee767669 | ||
|
|
7eac41691e | ||
|
|
d9516890b0 | ||
|
|
ddb96bb732 | ||
|
|
e419dd8d5c | ||
|
|
c99c65bdfd | ||
|
|
18fd7f56a8 | ||
|
|
a319dc1cff | ||
|
|
52bf3a55fc | ||
|
|
598e10e4fc | ||
|
|
eed3f42731 | ||
|
|
5a4bea8ccd | ||
|
|
5b37b63d89 | ||
|
|
5e5c4f7701 | ||
|
|
09dc638652 | ||
|
|
b598a01e7f | ||
|
|
0fcdb6f824 | ||
|
|
5a95dcf5ba | ||
|
|
d0c63e75df | ||
|
|
676b8a2230 | ||
|
|
a1cb3d3b87 | ||
|
|
47b4c54e05 | ||
|
|
fe822a65b9 | ||
|
|
9bd4931f93 | ||
|
|
411cb19b62 | ||
|
|
8f55299941 | ||
|
|
0bdcce79ba | ||
|
|
fcd29c305d | ||
|
|
54a6ab472a | ||
|
|
b2a5baa2ad | ||
|
|
dc6b71ca23 | ||
|
|
e1247a7eb2 | ||
|
|
65699b89bb | ||
|
|
a44956a05f | ||
|
|
b85b4bafc2 | ||
|
|
2e3c80c580 | ||
|
|
d24392feac | ||
|
|
5c6e20a774 | ||
|
|
9f9e23ff0e | ||
|
|
fa93de97de | ||
|
|
deedf7a5d0 | ||
|
|
73432127cd | ||
|
|
a78160ddad | ||
|
|
fff2517a76 | ||
|
|
eba7647e21 | ||
|
|
51076d21c3 | ||
|
|
077ed9839a | ||
|
|
bdcca55bd5 | ||
|
|
20b8e2bf6c | ||
|
|
cc596c42b3 | ||
|
|
7da717b251 | ||
|
|
bbdbf3995f | ||
|
|
7fb8bc6e29 | ||
|
|
b8fae294e4 | ||
|
|
23bc381f5c | ||
|
|
b4ade85a9c | ||
|
|
d04522027c | ||
|
|
7422bea51f | ||
|
|
abcc159390 | ||
|
|
a545954dbc | ||
|
|
dc6ef83fbd | ||
|
|
e17647bc9f | ||
|
|
feaa95aefe | ||
|
|
8ad8a1b6f0 | ||
|
|
22f6781c26 | ||
|
|
b165402e81 | ||
|
|
9ee8d977cb | ||
|
|
e1a6eb65f6 | ||
|
|
9096334eab | ||
|
|
c360cf52b1 | ||
|
|
00dc075d9c | ||
|
|
af77c9fa83 | ||
|
|
1c4ed0487a | ||
|
|
e4761d9694 | ||
|
|
fbc7e03c67 | ||
|
|
927011641d | ||
|
|
6eb71869e8 |
7
.github/pull_request_template.md
vendored
Normal file
7
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
1. Purpose or design rationale of this PR
|
||||
|
||||
|
||||
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
|
||||
|
||||
|
||||
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?
|
||||
6
.github/workflows/bridge.yml
vendored
6
.github/workflows/bridge.yml
vendored
@@ -31,7 +31,11 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: pontem-network/get-solc@master
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,6 +3,7 @@ assets/params*
|
||||
assets/seed
|
||||
coverage.txt
|
||||
build/bin
|
||||
*.integration.txt
|
||||
|
||||
# misc
|
||||
sftp-config.json
|
||||
|
||||
89
Jenkinsfile
vendored
89
Jenkinsfile
vendored
@@ -8,6 +8,7 @@ pipeline {
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
@@ -16,17 +17,6 @@ pipeline {
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
}
|
||||
}
|
||||
parallel {
|
||||
stage('Build Prerequisite') {
|
||||
steps {
|
||||
@@ -67,38 +57,63 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
stage('Parallel Test') {
|
||||
parallel{
|
||||
stage('Test bridge package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
|
||||
}
|
||||
}
|
||||
stage('Test common package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Test coordinator package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
|
||||
}
|
||||
}
|
||||
stage('Test database package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
|
||||
}
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
steps {
|
||||
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/...
|
||||
cd ..
|
||||
'''
|
||||
script {
|
||||
for (i in ['bridge', 'coordinator', 'database']) {
|
||||
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
|
||||
}
|
||||
}
|
||||
}
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
post {
|
||||
always {
|
||||
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
|
||||
@@ -5,7 +5,8 @@ IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/Mock_Bridge.sol --pkg mock_bridge --out mock_bridge/Mock_Bridge.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
|
||||
|
||||
bridge: ## Builds the Bridge instance.
|
||||
go build -o $(PWD)/build/bin/bridge ./cmd
|
||||
|
||||
@@ -22,16 +22,6 @@ make clean
|
||||
make bridge
|
||||
```
|
||||
|
||||
## DB config
|
||||
|
||||
* db settings in config
|
||||
|
||||
```bash
|
||||
# DB_DSN: db data source name
|
||||
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
|
||||
# DB_DRIVER: db driver name
|
||||
export DB_DRIVER="postgres"
|
||||
```
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
|
||||
@@ -9,20 +9,20 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
|
||||
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
|
||||
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
|
||||
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
|
||||
|
||||
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
|
||||
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
|
||||
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
|
||||
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
|
||||
|
||||
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
|
||||
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
|
||||
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
|
||||
|
||||
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
|
||||
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
|
||||
|
||||
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
|
||||
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
130
bridge/cmd/app/app.go
Normal file
130
bridge/cmd/app/app.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up Bridge app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "bridge"
|
||||
app.Usage = "The Scroll Bridge"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `bridge-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "bridge-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
// Init db connection.
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
var (
|
||||
l1Backend *l1.Backend
|
||||
l2Backend *l2.Backend
|
||||
)
|
||||
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
|
||||
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
l1Backend.Stop()
|
||||
l2Backend.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = l1Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l1 backend", "error", err)
|
||||
}
|
||||
if err = l2Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l2 backend", "error", err)
|
||||
}
|
||||
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
l2Backend.APIs())
|
||||
if err != nil {
|
||||
log.Crit("Could not start RPC api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
|
||||
log.Info("Start bridge successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run run bridge cmd instance.
|
||||
func Run() {
|
||||
// Run the bridge.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
bridge/cmd/app/app_test.go
Normal file
19
bridge/cmd/app/app_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunBridge(t *testing.T) {
|
||||
bridge := cmd.NewCmd(t, "bridge-test", "--version")
|
||||
defer bridge.WaitExit()
|
||||
|
||||
// wait result
|
||||
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
|
||||
bridge.RunApp(nil)
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package main
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
@@ -26,22 +28,4 @@ var (
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8290,
|
||||
}
|
||||
|
||||
l1Flags = []cli.Flag{
|
||||
&l1UrlFlag,
|
||||
}
|
||||
l1UrlFlag = cli.StringFlag{
|
||||
Name: "l1.endpoint",
|
||||
Usage: "The endpoint connect to l1chain node",
|
||||
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
}
|
||||
|
||||
l2Flags = []cli.Flag{
|
||||
&l2UrlFlag,
|
||||
}
|
||||
l2UrlFlag = cli.StringFlag{
|
||||
Name: "l2.endpoint",
|
||||
Usage: "The endpoint connect to l2chain node",
|
||||
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
}
|
||||
)
|
||||
@@ -1,125 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
import "scroll-tech/bridge/cmd/app"
|
||||
|
||||
func main() {
|
||||
// Set up Bridge app info.
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "bridge"
|
||||
app.Usage = "The Scroll Bridge"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Flags = append(app.Flags, l1Flags...)
|
||||
app.Flags = append(app.Flags, l2Flags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Run the sequencer.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func applyConfig(ctx *cli.Context, cfg *config.Config) {
|
||||
if ctx.IsSet(l1UrlFlag.Name) {
|
||||
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(l2UrlFlag.Name) {
|
||||
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
applyConfig(ctx, cfg)
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
var (
|
||||
l1Backend *l1.Backend
|
||||
l2Backend *l2.Backend
|
||||
)
|
||||
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
|
||||
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
l1Backend.Stop()
|
||||
l2Backend.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = l1Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l1 backend", "error", err)
|
||||
}
|
||||
if err = l2Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l2 backend", "error", err)
|
||||
}
|
||||
|
||||
apis := l2Backend.APIs()
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start HTTP api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
app.Run()
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{
|
||||
"l1_config": {
|
||||
"confirmations": 6,
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
@@ -10,12 +11,13 @@
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"check_pending_time": 3,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": 1,
|
||||
"confirmations": "0x1",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "AccessListTx",
|
||||
"min_balance": 100000000000000000000
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 500
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
@@ -23,7 +25,7 @@
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": 1,
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
@@ -33,12 +35,13 @@
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"check_pending_time": 10,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": 6,
|
||||
"confirmations": "0x6",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"min_balance": 100000000000000000000
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 500
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
@@ -50,6 +53,7 @@
|
||||
"batch_proposer_config": {
|
||||
"proof_generation_freq": 1,
|
||||
"batch_gas_threshold": 3000000,
|
||||
"batch_tx_num_threshold": 135,
|
||||
"batch_time_sec": 300,
|
||||
"batch_blocks_limit": 100,
|
||||
"skipped_opcodes": [
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
@@ -30,9 +28,5 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cover value by env fields
|
||||
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
|
||||
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
package config
|
||||
|
||||
import "github.com/scroll-tech/go-ethereum/common"
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// L1Config loads l1eth configuration items.
|
||||
type L1Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// l1 eth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The messenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The rollup contract address deployed on layer 1 chain.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
}
|
||||
|
||||
@@ -3,13 +3,15 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
type L2Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// l2geth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The messenger contract address deployed on layer 2 chain.
|
||||
@@ -24,6 +26,8 @@ type L2Config struct {
|
||||
type BatchProposerConfig struct {
|
||||
// Proof generation frequency, generating proof every k blocks
|
||||
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
|
||||
// Txnum threshold in a batch
|
||||
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
|
||||
// Gas threshold in a batch
|
||||
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
|
||||
// Time waited to generate a batch even if gas_threshold not met
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// SenderConfig The config for transaction sender
|
||||
@@ -19,7 +20,7 @@ type SenderConfig struct {
|
||||
// The number of blocks to wait to escalate increase gas price of the transaction.
|
||||
EscalateBlocks uint64 `json:"escalate_blocks"`
|
||||
// The gap number between a block be confirmed and the latest block.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// The numerator of gas price escalate multiple.
|
||||
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
|
||||
// The denominator of gas price escalate multiple.
|
||||
@@ -29,7 +30,8 @@ type SenderConfig struct {
|
||||
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
|
||||
TxType string `json:"tx_type"`
|
||||
// The min balance set for check and set balance for sender's accounts.
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
PendingLimit int64 `json:"pending_limit,omitempty"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
|
||||
@@ -5,10 +5,11 @@ go 1.18
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
modernc.org/mathutil v1.4.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -27,17 +28,18 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -336,6 +336,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
@@ -348,11 +350,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -423,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -539,8 +540,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -552,7 +553,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -662,5 +663,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
||||
@@ -26,12 +26,12 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RelayerConfig.RollupContractAddress, orm)
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
|
||||
65
bridge/l1/l1_test.go
Normal file
65
bridge/l1/l1_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
// docker consider handler.
|
||||
l1gethImg docker.ImgInstance
|
||||
l2gethImg docker.ImgInstance
|
||||
dbImg docker.ImgInstance
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
|
||||
|
||||
// Create l2geth container.
|
||||
l2gethImg = docker.NewTestL2Docker(t)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
|
||||
|
||||
// Create db container.
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
}
|
||||
|
||||
func free(t *testing.T) {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
if l1gethImg != nil {
|
||||
assert.NoError(t, l1gethImg.Stop())
|
||||
}
|
||||
if l2gethImg != nil {
|
||||
assert.NoError(t, l2gethImg.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
func TestL1(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("testStartWatcher", testStartWatcher)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
}
|
||||
@@ -3,15 +3,17 @@ package l1
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
@@ -28,7 +30,6 @@ import (
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
sender *sender.Sender
|
||||
|
||||
db orm.L1MessageOrm
|
||||
@@ -42,7 +43,7 @@ type Layer1Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Warn("new L2MessengerABI failed", "err", err)
|
||||
@@ -51,41 +52,128 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
|
||||
|
||||
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Error("new sender failed", "err", err)
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new sender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Layer1Relayer{
|
||||
layer1 := &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
client: ethClient,
|
||||
sender: sender,
|
||||
db: db,
|
||||
l2MessengerABI: l2MessengerABI,
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
confirmationCh: sender.ConfirmChan(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deal with broken transactions.
|
||||
if err = layer1.prepare(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return layer1, nil
|
||||
}
|
||||
|
||||
// prepare to run check logic and until it's finished.
|
||||
func (r *Layer1Relayer) prepare(ctx context.Context) error {
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.confirmationCh:
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
if err := r.checkSubmittedMessages(); err != nil {
|
||||
log.Error("failed to init layer1 submitted tx", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever util sender is empty.
|
||||
utils.TryTimes(-1, func() bool {
|
||||
return r.sender.PendingCount() == 0
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) checkSubmittedMessages() error {
|
||||
var blockNumber uint64
|
||||
BEGIN:
|
||||
msgs, err := r.db.GetL1Messages(
|
||||
map[string]interface{}{"status": orm.MsgSubmitted},
|
||||
fmt.Sprintf("AND height > %d", blockNumber),
|
||||
fmt.Sprintf("ORDER BY height ASC LIMIT %d", 100),
|
||||
)
|
||||
if err != nil || len(msgs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.sender.IsFull() {
|
||||
log.Warn("layer1 sender pending tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
msg, msgs = msgs[0], msgs[1:]
|
||||
|
||||
blockNumber = mathutil.MaxUint64(blockNumber, msg.Height)
|
||||
|
||||
data, err := r.packRelayMessage(msg)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
err = r.sender.LoadOrSendTx(
|
||||
common.HexToHash(msg.Layer2Hash),
|
||||
msg.MsgHash,
|
||||
&r.cfg.MessengerContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send l1 submitted tx", "msg hash", msg.MsgHash, "err", err)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msgs) > 0 {
|
||||
log.Info("Processing L1 messages", "count", len(msgs))
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
if err = r.processSavedEvent(msg); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process event", "err", err)
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
func (r *Layer1Relayer) packRelayMessage(msg *orm.L1Message) ([]byte, error) {
|
||||
// @todo add support to relay multiple messages
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
@@ -103,10 +191,23 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
data, err := r.packRelayMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,45 +1,25 @@
|
||||
package l1_test
|
||||
package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
// TestCreateNewRelayer test create new relayer instance and stop
|
||||
func TestCreateNewL1Relayer(t *testing.T) {
|
||||
cfg, err := config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
l1docker := docker.NewTestL1Docker(t)
|
||||
defer l1docker.Stop()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1docker.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1docker.Endpoint()
|
||||
|
||||
client, err := ethclient.Dial(l1docker.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbImg := docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
defer dbImg.Stop()
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -19,6 +21,10 @@ import (
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -38,7 +44,7 @@ type Watcher struct {
|
||||
db database.OrmFactory
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations uint64
|
||||
confirmations rpc.BlockNumber
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -53,7 +59,7 @@ type Watcher struct {
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
@@ -82,22 +88,24 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
// Start the Watcher module.
|
||||
func (w *Watcher) Start() {
|
||||
go func() {
|
||||
// trigger by timer
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
for ; true; <-ticker.C {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
blockNumber, err := w.client.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get block number", "err", err)
|
||||
}
|
||||
if err := w.fetchContractEvent(blockNumber); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
case <-w.stop:
|
||||
return
|
||||
|
||||
default:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -111,105 +119,114 @@ func (w *Watcher) Stop() {
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *Watcher) fetchContractEvent(blockHeight uint64) error {
|
||||
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
defer func() {
|
||||
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
if to > toBlock {
|
||||
to = toBlock
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(fromBlock), // inclusive
|
||||
ToBlock: big.NewInt(toBlock), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.rollupAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
|
||||
// warning: uint int conversion...
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.rollupAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
|
||||
"cnt", len(logs))
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchIDs []string
|
||||
for _, event := range rollupEvents {
|
||||
batchIDs = append(batchIDs, event.batchID.String())
|
||||
}
|
||||
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByIDList", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(statuses) != len(batchIDs) {
|
||||
log.Error("RollupStatus.Length mismatch with BatchIDs.Length")
|
||||
return nil
|
||||
}
|
||||
// use rollup event to update rollup results db status
|
||||
var batchIDs []string
|
||||
for _, event := range rollupEvents {
|
||||
batchIDs = append(batchIDs, event.batchID.String())
|
||||
}
|
||||
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByIDList", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(statuses) != len(batchIDs) {
|
||||
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
for index, event := range rollupEvents {
|
||||
batchID := event.batchID.String()
|
||||
status := statuses[index]
|
||||
if event.status != status {
|
||||
if event.status == orm.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
} else if event.status == orm.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
for index, event := range rollupEvents {
|
||||
batchID := event.batchID.String()
|
||||
status := statuses[index]
|
||||
// only update when db status is before event status
|
||||
if event.status > status {
|
||||
if event.status == orm.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
} else if event.status == orm.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
|
||||
err = w.db.SaveL1Messages(w.ctx, sentMessageEvents)
|
||||
if err == nil {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
@@ -221,7 +238,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
@@ -242,7 +259,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l1Messages = append(l1Messages, &orm.L1Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -253,7 +270,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -264,7 +281,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -275,7 +292,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
@@ -295,7 +312,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupCommitted,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
|
||||
29
bridge/l1/watcher_test.go
Normal file
29
bridge/l1/watcher_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testStartWatcher(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package l2
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
@@ -28,7 +27,7 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -67,8 +66,3 @@ func (l2 *Backend) APIs() []rpc.API {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MockBlockTrace for test case
|
||||
func (l2 *Backend) MockBlockTrace(blockTrace *types.BlockTrace) {
|
||||
l2.l2Watcher.Send(blockTrace)
|
||||
}
|
||||
|
||||
@@ -18,9 +18,10 @@ type batchProposer struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchTxNumThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
|
||||
proofGenerationFreq uint64
|
||||
skippedOpcodes map[string]struct{}
|
||||
@@ -32,13 +33,14 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
|
||||
orm: orm,
|
||||
batchTimeSec: cfg.BatchTimeSec,
|
||||
batchGasThreshold: cfg.BatchGasThreshold,
|
||||
batchTxNumThreshold: cfg.BatchTxNumThreshold,
|
||||
batchBlocksLimit: cfg.BatchBlocksLimit,
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
skippedOpcodes: cfg.SkippedOpcodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) tryProposeBatch() error {
|
||||
func (w *batchProposer) tryProposeBatch() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
@@ -47,38 +49,53 @@ func (w *batchProposer) tryProposeBatch() error {
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Error("failed to get unbatched blocks", "err", err)
|
||||
return
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > w.batchGasThreshold {
|
||||
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
|
||||
return w.createBatchForBlocks(blocks[:1])
|
||||
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > w.batchTxNumThreshold {
|
||||
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
|
||||
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
length = len(blocks)
|
||||
gasUsed uint64
|
||||
length = len(blocks)
|
||||
gasUsed, txNum uint64
|
||||
)
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blocks {
|
||||
if gasUsed+block.GasUsed > w.batchGasThreshold {
|
||||
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
|
||||
blocks = blocks[:i]
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
return w.createBatchForBlocks(blocks)
|
||||
if err = w.createBatchForBlocks(blocks); err != nil {
|
||||
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
|
||||
|
||||
62
bridge/l2/batch_proposer_test.go
Normal file
62
bridge/l2/batch_proposer_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
func testBatchProposer(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
trace2 := &types.BlockTrace{}
|
||||
trace3 := &types.BlockTrace{}
|
||||
|
||||
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace3)
|
||||
assert.NoError(t, err)
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
|
||||
|
||||
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
|
||||
|
||||
proposer := newBatchProposer(&config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, db)
|
||||
proposer.tryProposeBatch()
|
||||
|
||||
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", 100))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(infos) == 0)
|
||||
|
||||
exist, err := db.BatchRecordExist(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -77,6 +77,9 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
t.Run("testBatchProposer", testBatchProposer)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
|
||||
@@ -2,24 +2,20 @@ package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
@@ -29,34 +25,34 @@ import (
|
||||
// Actions are triggered by new head from layer 1 geth node.
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer2Relayer struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
ctx context.Context
|
||||
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
l1MessengerABI *abi.ABI
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
|
||||
rollupSender *sender.Sender
|
||||
rollupCh <-chan *sender.Confirmation
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
// a list of processing message, indexed by layer2 hash
|
||||
processingMessage map[string]string
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
|
||||
// a list of processing batch commitment, indexed by batch id
|
||||
processingCommitment map[string]string
|
||||
// A list of processing batch commitment.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
processingCommitment sync.Map
|
||||
|
||||
// a list of processing batch finalization, indexed by batch id
|
||||
processingFinalization map[string]string
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
processingFinalization sync.Map
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -70,314 +66,103 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2Confir
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Layer2Relayer{
|
||||
layer2 := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
client: ethClient,
|
||||
db: db,
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.RollupMetaABI,
|
||||
cfg: cfg,
|
||||
processingMessage: map[string]string{},
|
||||
processingCommitment: map[string]string{},
|
||||
processingFinalization: map[string]string{},
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deal with broken transactions.
|
||||
if err = layer2.prepare(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return layer2, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
if err := r.processSavedEvent(msg); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
// prepare to run check logic and until it's finished.
|
||||
func (r *Layer2Relayer) prepare(ctx context.Context) error {
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
|
||||
// @todo add support to relay multiple messages
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
if err := r.checkSubmittedMessages(); err != nil {
|
||||
log.Error("failed to init layer2 submitted tx", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if batch.EndBlockNumber < msg.Height {
|
||||
// log.Warn("corresponding block not finalized", "status", status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(int64(batch.Index)),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
if err := r.checkCommittingBatches(); err != nil {
|
||||
log.Error("failed to init layer2 committed tx", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
if err := r.checkFinalizingBatches(); err != nil {
|
||||
log.Error("failed to init layer2 finalized tx", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
|
||||
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingMessage[msg.MsgHash] = msg.MsgHash
|
||||
// Wait forever until message sender and roller sender are empty.
|
||||
utils.TryTimes(-1, func() bool {
|
||||
return r.messageSender.PendingCount() == 0 && r.rollupSender.PendingCount() == 0
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches()
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batchesInDB) == 0 {
|
||||
return
|
||||
}
|
||||
id := batchesInDB[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
|
||||
return
|
||||
}
|
||||
batch := batches[0]
|
||||
|
||||
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
|
||||
if err != nil || len(traces) == 0 {
|
||||
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: batch.Index,
|
||||
ParentHash: common.HexToHash(batch.ParentHash),
|
||||
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
|
||||
}
|
||||
|
||||
parentHash := common.HexToHash(batch.ParentHash)
|
||||
for i, trace := range traces {
|
||||
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: parentHash,
|
||||
BaseFee: trace.Header.BaseFee,
|
||||
StateRoot: trace.StorageTrace.RootAfter,
|
||||
BlockHeight: trace.Header.Number.Uint64(),
|
||||
GasUsed: 0,
|
||||
Timestamp: trace.Header.Time,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
Value: tx.Value.ToInt(),
|
||||
Data: common.Hex2Bytes(tx.Data),
|
||||
R: tx.R.ToInt(),
|
||||
S: tx.S.ToInt(),
|
||||
V: tx.V.ToInt().Uint64(),
|
||||
}
|
||||
if tx.To != nil {
|
||||
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
|
||||
}
|
||||
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
|
||||
}
|
||||
|
||||
// for next iteration
|
||||
parentHash = layer2Batch.Blocks[i].BlockHash
|
||||
}
|
||||
|
||||
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "id", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment[id] = id
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batches, err := r.db.GetCommittedBatches()
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) == 0 {
|
||||
return
|
||||
}
|
||||
id := batches[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
status, err := r.db.GetProvingStatusByID(id)
|
||||
if err != nil {
|
||||
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
|
||||
case orm.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "id", id)
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by id failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "id", id)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txHash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
hash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "id", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization[id] = id
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
"block_status", status,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer2Relayer) Start() {
|
||||
go func() {
|
||||
// trigger by timer
|
||||
loop := func(ctx context.Context, f func()) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.ProcessSavedEvents()
|
||||
r.ProcessPendingBatches()
|
||||
r.ProcessCommittedBatches()
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case <-r.stopCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go loop(ctx, r.ProcessSavedEvents)
|
||||
go loop(ctx, r.ProcessPendingBatches)
|
||||
go loop(ctx, r.ProcessCommittedBatches)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -394,36 +179,36 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
|
||||
transactionType := "Unknown"
|
||||
// check whether it is message relay transaction
|
||||
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
|
||||
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
|
||||
transactionType = "MessageRelay"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
|
||||
}
|
||||
delete(r.processingMessage, confirmation.ID)
|
||||
r.processingMessage.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is block commitment transaction
|
||||
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
|
||||
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchCommitment"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
}
|
||||
delete(r.processingCommitment, confirmation.ID)
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is proof finalization transaction
|
||||
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
|
||||
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
transactionType = "ProofFinalization"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
}
|
||||
delete(r.processingFinalization, confirmation.ID)
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
}
|
||||
|
||||
171
bridge/l2/relayer_commit.go
Normal file
171
bridge/l2/relayer_commit.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
func (r *Layer2Relayer) checkCommittingBatches() error {
|
||||
var batchIndex uint64
|
||||
BEGIN:
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{"rollup_status": orm.RollupCommitting},
|
||||
fmt.Sprintf("AND index > %d", batchIndex),
|
||||
fmt.Sprintf("ORDER BY index ASC LIMIT %d", 10),
|
||||
)
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.rollupSender.IsFull() {
|
||||
log.Warn("layer2 rollup sender pending committed tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
batch, batches = batches[0], batches[1:]
|
||||
|
||||
id := batch.ID
|
||||
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
|
||||
|
||||
txStr, err := r.db.GetCommitTxHash(id)
|
||||
if err != nil {
|
||||
log.Error("failed to get commit_tx_hash from block_batch", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, data, err := r.packCommitBatch(id)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send committed tx", "batch id", id, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
err = r.rollupSender.LoadOrSendTx(
|
||||
common.HexToHash(txStr.String),
|
||||
txID,
|
||||
&r.cfg.RollupContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send tx", "batch id", id, "err", err)
|
||||
} else {
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packCommitBatch(id string) (*orm.BlockBatch, []byte, error) {
|
||||
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
batch := batches[0]
|
||||
|
||||
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
|
||||
if err != nil || len(traces) == 0 {
|
||||
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: batch.Index,
|
||||
ParentHash: common.HexToHash(batch.ParentHash),
|
||||
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
|
||||
}
|
||||
|
||||
parentHash := common.HexToHash(batch.ParentHash)
|
||||
for i, trace := range traces {
|
||||
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: parentHash,
|
||||
BaseFee: trace.Header.BaseFee,
|
||||
StateRoot: trace.StorageTrace.RootAfter,
|
||||
BlockHeight: trace.Header.Number.Uint64(),
|
||||
GasUsed: 0,
|
||||
Timestamp: trace.Header.Time,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
Value: tx.Value.ToInt(),
|
||||
Data: common.Hex2Bytes(tx.Data),
|
||||
R: tx.R.ToInt(),
|
||||
S: tx.S.ToInt(),
|
||||
V: tx.V.ToInt().Uint64(),
|
||||
}
|
||||
if tx.To != nil {
|
||||
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
|
||||
}
|
||||
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
|
||||
}
|
||||
|
||||
// for next iteration
|
||||
parentHash = layer2Batch.Blocks[i].BlockHash
|
||||
}
|
||||
|
||||
data, err := bridge_abi.RollupMetaABI.Pack("commitBatch", layer2Batch)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return batch, data, nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batchesInDB) == 0 {
|
||||
return
|
||||
}
|
||||
id := batchesInDB[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batch, data, err := r.packCommitBatch(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
|
||||
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
195
bridge/l2/relayer_finalize.go
Normal file
195
bridge/l2/relayer_finalize.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
func (r *Layer2Relayer) checkFinalizingBatches() error {
|
||||
var (
|
||||
batchLimit = 10
|
||||
batchIndex uint64
|
||||
)
|
||||
BEGIN:
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{"rollup_status": orm.RollupFinalizing},
|
||||
fmt.Sprintf("AND index > %d", batchIndex),
|
||||
fmt.Sprintf("ORDER BY index ASC LIMIT %d", batchLimit),
|
||||
)
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.rollupSender.IsFull() {
|
||||
log.Warn("layer2 rollup sender pending finalized tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
batch, batches = batches[0], batches[1:]
|
||||
|
||||
id := batch.ID
|
||||
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
|
||||
|
||||
txStr, err := r.db.GetFinalizeTxHash(id)
|
||||
if err != nil {
|
||||
log.Error("failed to get finalize_tx_hash from block_batch", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := r.packFinalizeBatch(id)
|
||||
if err != nil {
|
||||
log.Error("failed to pack finalize data", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
err = r.rollupSender.LoadOrSendTx(
|
||||
common.HexToHash(txStr.String),
|
||||
txID,
|
||||
&r.cfg.RollupContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send finalized tx", "batch id", id, "err", err)
|
||||
} else {
|
||||
r.processingFinalization.Store(txID, id)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packFinalizeBatch(id string) ([]byte, error) {
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by id failed", "id", id, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "id", id)
|
||||
return nil, err
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
|
||||
return nil, err
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
data, err := bridge_abi.RollupMetaABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// set skipped batches in a single db operation
|
||||
if count, err := r.db.UpdateSkippedBatches(); err != nil {
|
||||
log.Error("UpdateSkippedBatches failed", "err", err)
|
||||
// continue anyway
|
||||
} else if count > 0 {
|
||||
log.Info("Skipping batches", "count", count)
|
||||
}
|
||||
|
||||
// batches are sorted by batch index in increasing order
|
||||
batches, err := r.db.GetCommittedBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) == 0 {
|
||||
return
|
||||
}
|
||||
id := batches[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
status, err := r.db.GetProvingStatusByID(id)
|
||||
if err != nil {
|
||||
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
|
||||
case orm.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "id", id)
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pack finalize data.
|
||||
data, err := r.packFinalizeBatch(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
hash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization.Store(txID, id)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
"block_status", status,
|
||||
)
|
||||
}
|
||||
}
|
||||
183
bridge/l2/relayer_message.go
Normal file
183
bridge/l2/relayer_message.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
const processMsgLimit = 100
|
||||
|
||||
func (r *Layer2Relayer) checkSubmittedMessages() error {
|
||||
var nonce uint64
|
||||
BEGIN:
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgSubmitted},
|
||||
fmt.Sprintf("AND nonce > %d", nonce),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
if err != nil || len(msgs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
var batch *orm.BlockBatch
|
||||
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
|
||||
// If pending pool is full, wait a while and retry.
|
||||
if r.messageSender.IsFull() {
|
||||
log.Warn("layer2 message tx sender is full")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
msg, msgs = msgs[0], msgs[1:]
|
||||
nonce = mathutil.MaxUint64(nonce, msg.Nonce)
|
||||
|
||||
// Get batch by block number.
|
||||
if batch == nil || msg.Height < batch.StartBlockNumber || msg.Height > batch.EndBlockNumber {
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{},
|
||||
fmt.Sprintf("AND start_block_number <= %d AND end_block_number >= %d", msg.Height, msg.Height),
|
||||
)
|
||||
// If get batch failed, stop and return immediately.
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
batch = batches[0]
|
||||
}
|
||||
|
||||
data, err := r.packRelayMessage(msg, batch.Index)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.messageSender.LoadOrSendTx(
|
||||
common.HexToHash(msg.Layer1Hash),
|
||||
msg.MsgHash,
|
||||
&r.cfg.MessengerContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send l2 submitted tx", "batch id", batch.ID, "msg hash", msg.MsgHash, "err", err)
|
||||
} else {
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgPending},
|
||||
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// process messages in batches
|
||||
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
|
||||
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
|
||||
if size = len(msgs); size > batchSize {
|
||||
size = batchSize
|
||||
}
|
||||
var g errgroup.Group
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
data, err := r.packRelayMessage(msg, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
|
||||
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packRelayMessage(msg *orm.L2Message, index uint64) ([]byte, error) {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := bridge_abi.L1MessengerMetaABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -42,7 +40,7 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -57,7 +55,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -76,7 +74,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(context.Background(), traces)
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -111,7 +109,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -132,7 +130,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
traces = append(traces, blockTrace)
|
||||
|
||||
err = db.InsertBlockTraces(context.Background(), traces)
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -168,7 +166,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -195,3 +193,68 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
}
|
||||
|
||||
func testL2RelayerSkipBatches(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return batchID
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
|
||||
}
|
||||
|
||||
notSkipped := []string{
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
|
||||
}
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
for _, id := range skipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizationSkipped, status)
|
||||
}
|
||||
|
||||
for _, id := range notSkipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
@@ -24,6 +26,11 @@ import (
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l2/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -39,7 +46,7 @@ type WatcherClient struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
confirmations uint64
|
||||
confirmations rpc.BlockNumber
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -53,7 +60,7 @@ type WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
@@ -81,65 +88,70 @@ func (w *WatcherClient) Start() {
|
||||
panic("must run L2 watcher with DB")
|
||||
}
|
||||
|
||||
lastFetchedBlock, err := w.orm.GetBlockTracesLatestHeight()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to GetBlockTracesLatestHeight in DB: %v", err))
|
||||
}
|
||||
ctx, cancel := context.WithCancel(w.ctx)
|
||||
|
||||
if lastFetchedBlock < 0 {
|
||||
lastFetchedBlock = 0
|
||||
}
|
||||
lastBlockHeightChangeTime := time.Now()
|
||||
// trace fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// trigger by timer
|
||||
// TODO: make it configurable
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// get current height
|
||||
number, err := w.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get_BlockNumber", "err", err)
|
||||
continue
|
||||
}
|
||||
duration := time.Since(lastBlockHeightChangeTime)
|
||||
var blockToFetch uint64
|
||||
if number > uint64(lastFetchedBlock)+w.confirmations {
|
||||
// latest block height changed
|
||||
blockToFetch = number - w.confirmations
|
||||
} else if duration.Seconds() > 60 {
|
||||
// l2geth didn't produce any blocks more than 1 minute.
|
||||
blockToFetch = number
|
||||
}
|
||||
// fetch at most `blockTracesFetchLimit=10` missing blocks
|
||||
if blockToFetch > uint64(lastFetchedBlock)+blockTracesFetchLimit {
|
||||
blockToFetch = uint64(lastFetchedBlock) + blockTracesFetchLimit
|
||||
}
|
||||
if lastFetchedBlock != int64(blockToFetch) {
|
||||
lastFetchedBlock = int64(blockToFetch)
|
||||
lastBlockHeightChangeTime = time.Now()
|
||||
}
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.tryFetchRunningMissingBlocks(w.ctx, blockToFetch); err != nil {
|
||||
log.Error("failed to fetchRunningMissingBlocks", "err", err)
|
||||
w.tryFetchRunningMissingBlocks(ctx, number)
|
||||
}
|
||||
|
||||
// @todo handle error
|
||||
if err := w.fetchContractEvent(number); err != nil {
|
||||
log.Error("failed to fetchContractEvent", "err", err)
|
||||
}
|
||||
|
||||
if err := w.batchProposer.tryProposeBatch(); err != nil {
|
||||
log.Error("failed to tryProposeBatch", "err", err)
|
||||
}
|
||||
|
||||
case <-w.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// event fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
w.FetchContractEvent(number)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// batch proposer loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
w.batchProposer.tryProposeBatch()
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -151,109 +163,134 @@ func (w *WatcherClient) Stop() {
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// try fetch missing blocks if inconsistent
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
|
||||
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
|
||||
return
|
||||
}
|
||||
backTrackTo := uint64(0)
|
||||
|
||||
// Can't get trace from genesis block, so the default start number is 1.
|
||||
var from = uint64(1)
|
||||
if heightInDB > 0 {
|
||||
backTrackTo = uint64(heightInDB)
|
||||
from = uint64(heightInDB) + 1
|
||||
}
|
||||
|
||||
// start backtracking
|
||||
for ; from <= blockHeight; from += blockTracesFetchLimit {
|
||||
to := from + blockTracesFetchLimit - 1
|
||||
|
||||
if to > blockHeight {
|
||||
to = blockHeight
|
||||
}
|
||||
|
||||
// Get block traces and insert into db.
|
||||
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var traces []*types.BlockTrace
|
||||
for number := backTrackFrom; number > backTrackTo; number-- {
|
||||
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block trace", "height", number)
|
||||
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
|
||||
}
|
||||
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash)
|
||||
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
|
||||
|
||||
traces = append(traces, trace)
|
||||
|
||||
}
|
||||
if len(traces) > 0 {
|
||||
if err = w.orm.InsertBlockTraces(ctx, traces); err != nil {
|
||||
if err := w.orm.InsertBlockTraces(traces); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
defer func() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(fromBlock), // inclusive
|
||||
ToBlock: big.NewInt(toBlock), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Error("failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
|
||||
"cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
if to > toBlock {
|
||||
to = toBlock
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
log.Error("failed to get event logs", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents)
|
||||
if err == nil {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
|
||||
@@ -264,7 +301,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
@@ -285,7 +322,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l2Messages = append(l2Messages, &orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -296,7 +333,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -307,7 +344,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
|
||||
@@ -1,33 +1,5 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// WatcherAPI watcher api service
|
||||
type WatcherAPI interface {
|
||||
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
|
||||
}
|
||||
|
||||
// ReplayBlockResultByHash temporary interface for easy testing.
|
||||
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
|
||||
orm := r.orm
|
||||
params := make(map[string]interface{})
|
||||
if number, ok := blockNrOrHash.Number(); ok {
|
||||
params["number"] = int64(number)
|
||||
}
|
||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||
params["hash"] = hash.String()
|
||||
}
|
||||
if len(params) == 0 {
|
||||
return false, errors.New("empty params")
|
||||
}
|
||||
trace, err := orm.GetBlockTraces(params)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
r.Send(&trace[0])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
@@ -32,12 +32,12 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer l2db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := l2.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -68,7 +68,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
@@ -80,7 +80,10 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -90,7 +93,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
// extra block mined
|
||||
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message = []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -110,7 +113,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("Height in DB is", height)
|
||||
assert.Greater(t, height, int64(previousHeight))
|
||||
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(msgs))
|
||||
}
|
||||
@@ -127,7 +130,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
@@ -147,7 +150,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -163,7 +168,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -178,13 +185,14 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("LatestHeight is", height)
|
||||
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
|
||||
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
|
||||
return l2.NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, bpCfg, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
187
bridge/mock_bridge/MockBridgeL1.sol
Normal file
187
bridge/mock_bridge/MockBridgeL1.sol
Normal file
@@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL1 {
|
||||
/*********************************
|
||||
* Events from L1ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/************************
|
||||
* Events from ZKRollup *
|
||||
************************/
|
||||
|
||||
/// @notice Emitted when a new batch is commited.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
|
||||
/// @notice Emitted when a batch is reverted.
|
||||
/// @param _batchId The identification of the batch.
|
||||
event RevertBatch(bytes32 indexed _batchId);
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
uint256 blockHeight;
|
||||
bytes merkleProof;
|
||||
}
|
||||
|
||||
/// @dev The transanction struct
|
||||
struct Layer2Transaction {
|
||||
address caller;
|
||||
uint64 nonce;
|
||||
address target;
|
||||
uint64 gas;
|
||||
uint256 gasPrice;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
// signature
|
||||
uint256 r;
|
||||
uint256 s;
|
||||
uint64 v;
|
||||
}
|
||||
|
||||
/// @dev The block header struct
|
||||
struct Layer2BlockHeader {
|
||||
bytes32 blockHash;
|
||||
bytes32 parentHash;
|
||||
uint256 baseFee;
|
||||
bytes32 stateRoot;
|
||||
uint64 blockHeight;
|
||||
uint64 gasUsed;
|
||||
uint64 timestamp;
|
||||
bytes extraData;
|
||||
Layer2Transaction[] txs;
|
||||
}
|
||||
|
||||
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
|
||||
struct Layer2Batch {
|
||||
uint64 batchIndex;
|
||||
// The hash of the last block in the parent batch
|
||||
bytes32 parentHash;
|
||||
Layer2BlockHeader[] blocks;
|
||||
}
|
||||
|
||||
struct Layer2BatchStored {
|
||||
bytes32 batchHash;
|
||||
bytes32 parentHash;
|
||||
uint64 batchIndex;
|
||||
bool verified;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/// @notice Mapping from batch id to batch struct.
|
||||
mapping(bytes32 => Layer2BatchStored) public batches;
|
||||
|
||||
/************************************
|
||||
* Functions from L1ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
uint256 _nonce = messageNonce;
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce += 1;
|
||||
}
|
||||
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
}
|
||||
|
||||
/***************************
|
||||
* Functions from ZKRollup *
|
||||
***************************/
|
||||
|
||||
function commitBatch(Layer2Batch memory _batch) external {
|
||||
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
|
||||
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
|
||||
|
||||
Layer2BatchStored storage _batchStored = batches[_batchId];
|
||||
_batchStored.batchHash = _batchHash;
|
||||
_batchStored.parentHash = _batch.parentHash;
|
||||
_batchStored.batchIndex = _batch.batchIndex;
|
||||
|
||||
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
|
||||
}
|
||||
|
||||
function revertBatch(bytes32 _batchId) external {
|
||||
emit RevertBatch(_batchId);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes32 _batchId,
|
||||
uint256[] memory,
|
||||
uint256[] memory
|
||||
) external {
|
||||
Layer2BatchStored storage _batch = batches[_batchId];
|
||||
uint256 _batchIndex = _batch.batchIndex;
|
||||
|
||||
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute a unique batch id for mapping.
|
||||
/// @param _batchHash The hash of the batch.
|
||||
/// @param _parentHash The hash of the batch.
|
||||
/// @param _batchIndex The index of the batch.
|
||||
/// @return Return the computed batch id.
|
||||
function _computeBatchId(
|
||||
bytes32 _batchHash,
|
||||
bytes32 _parentHash,
|
||||
uint256 _batchIndex
|
||||
) internal pure returns (bytes32) {
|
||||
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
|
||||
}
|
||||
}
|
||||
67
bridge/mock_bridge/MockBridgeL2.sol
Normal file
67
bridge/mock_bridge/MockBridgeL2.sol
Normal file
@@ -0,0 +1,67 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL2 {
|
||||
/*********************************
|
||||
* Events from L2ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/************************************
|
||||
* Functions from L2ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _nonce = messageNonce;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
//SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract Mock_Bridge {
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
// @todo compute fee
|
||||
uint256 _fee = 0;
|
||||
uint256 _nonce = messageNonce;
|
||||
require(msg.value >= _fee, "cannot pay fee");
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
|
||||
unchecked {
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
@@ -86,6 +88,7 @@ type Sender struct {
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
pendingNum int64 // current pending tx count.
|
||||
pendingTxs sync.Map // Mapping from nonce to pending transaction
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
@@ -120,6 +123,15 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
if config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -128,7 +140,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
auths: auths,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: header.BaseFee.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
@@ -138,6 +150,16 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// PendingCount return the current pending txs num.
|
||||
func (s *Sender) PendingCount() int64 {
|
||||
return atomic.LoadInt64(&s.pendingNum)
|
||||
}
|
||||
|
||||
// PendingLimit return the maximum pendingTxs can handle.
|
||||
func (s *Sender) PendingLimit() int64 {
|
||||
return s.config.PendingLimit
|
||||
}
|
||||
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
@@ -156,7 +178,7 @@ func (s *Sender) NumberOfAccounts() int {
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
|
||||
// estimate gas limit
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, ethereum.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -188,16 +210,27 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsFull If pendingTxs pool is full return true.
|
||||
func (s *Sender) IsFull() bool {
|
||||
return atomic.LoadInt64(&s.pendingNum) == s.config.PendingLimit
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
|
||||
if s.IsFull() {
|
||||
return common.Hash{}, fmt.Errorf("pending txs is full, pending size: %d", s.config.PendingLimit)
|
||||
}
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
|
||||
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
atomic.AddInt64(&s.pendingNum, 1)
|
||||
|
||||
// get
|
||||
auth := s.auths.getAccount()
|
||||
if auth == nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
return common.Hash{}, ErrNoAvailableAccount
|
||||
}
|
||||
|
||||
@@ -205,6 +238,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -232,6 +266,61 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Sender) getTxAndAddr(txHash common.Hash) (*types.Transaction, uint64, common.Address, error) {
|
||||
tx, isPending, err := s.client.TransactionByHash(s.ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
|
||||
sender, err := types.Sender(types.LatestSignerForChainID(s.chainID), tx)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
|
||||
if isPending {
|
||||
return tx, s.blockNumber, sender, nil
|
||||
}
|
||||
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
return tx, receipt.BlockNumber.Uint64(), sender, nil
|
||||
}
|
||||
|
||||
// LoadOrSendTx If the tx already exist in chain load it or resend it.
|
||||
func (s *Sender) LoadOrSendTx(destTxHash common.Hash, ID string, target *common.Address, value *big.Int, data []byte) error {
|
||||
tx, blockNumber, from, err := s.getTxAndAddr(destTxHash)
|
||||
// If this tx already exist load it to the pending.
|
||||
if err == nil && tx != nil {
|
||||
auth := s.auths.accounts[from]
|
||||
var feeData *FeeData
|
||||
feeData, err = s.getFeeData(auth, target, value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
|
||||
return fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
atomic.AddInt64(&s.pendingNum, 1)
|
||||
s.pendingTxs.Store(ID, &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: auth,
|
||||
// Record the transaction's block blockNumber.
|
||||
submitAt: blockNumber,
|
||||
feeData: feeData,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tx is dropped from chain node, resend it.
|
||||
_, err = s.SendTransaction(ID, target, value, data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
|
||||
var (
|
||||
nonce = auth.Nonce.Uint64()
|
||||
@@ -352,11 +441,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
|
||||
func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
|
||||
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
|
||||
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
|
||||
number := header.Number.Uint64()
|
||||
atomic.StoreUint64(&s.blockNumber, number)
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
} else {
|
||||
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
s.pendingTxs.Range(func(key, value interface{}) bool {
|
||||
// ignore empty id, since we use empty id to occupy pending task
|
||||
if value == nil || reflect.ValueOf(value).IsNil() {
|
||||
@@ -366,8 +464,9 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
pending := value.(*PendingTransaction)
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
s.pendingTxs.Delete(key)
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
ID: pending.id,
|
||||
@@ -399,6 +498,7 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
if strings.Contains(err.Error(), "nonce") {
|
||||
// This key can be deleted
|
||||
s.pendingTxs.Delete(key)
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
// Try get receipt by the latest replaced tx hash
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
@@ -440,7 +540,14 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
continue
|
||||
}
|
||||
s.CheckPendingTransaction(header)
|
||||
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest confirmed block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
// Check and set balance.
|
||||
_ = s.auths.checkAndSetBalances(ctx)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
"scroll-tech/bridge/sender"
|
||||
)
|
||||
|
||||
const TX_BATCH = 50
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKeys []*ecdsa.PrivateKey
|
||||
@@ -48,6 +49,7 @@ func TestSender(t *testing.T) {
|
||||
// Setup
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("testLoadOrSendTx", testLoadOrSendTx)
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
@@ -58,6 +60,38 @@ func TestSender(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testLoadOrSendTx(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = 0
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newSender2, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := "aaa"
|
||||
|
||||
hash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(0), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = newSender2.LoadOrSendTx(hash, id, &toAddr, big.NewInt(0), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
select {
|
||||
case cfm := <-newSender2.ConfirmChan():
|
||||
assert.Equal(t, true, cfm.IsSuccessful)
|
||||
assert.Equal(t, hash, cfm.TxHash)
|
||||
assert.Equal(t, id, cfm.ID)
|
||||
case <-time.After(time.Second * 10):
|
||||
t.Error("testLoadOrSendTx test failed because of timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchSender(t *testing.T, batchSize int) {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
@@ -68,7 +102,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
}
|
||||
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = 0
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -84,7 +118,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TX_BATCH; i++ {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
|
||||
@@ -103,7 +137,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
if err := eg.Wait(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
|
||||
206
bridge/tests/bridge_test.go
Normal file
206
bridge/tests/bridge_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
// private key
|
||||
privateKey *ecdsa.PrivateKey
|
||||
|
||||
// docker consider handler.
|
||||
l1gethImg docker.ImgInstance
|
||||
l2gethImg docker.ImgInstance
|
||||
dbImg docker.ImgInstance
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
// auth
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
|
||||
// l1 messenger contract
|
||||
l1MessengerInstance *mock_bridge.MockBridgeL1
|
||||
l1MessengerAddress common.Address
|
||||
|
||||
// l1 rollup contract
|
||||
l1RollupInstance *mock_bridge.MockBridgeL1
|
||||
l1RollupAddress common.Address
|
||||
|
||||
// l2 messenger contract
|
||||
l2MessengerInstance *mock_bridge.MockBridgeL2
|
||||
l2MessengerAddress common.Address
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
var err error
|
||||
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
|
||||
assert.NoError(t, err)
|
||||
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
|
||||
assert.NoError(t, err)
|
||||
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
|
||||
|
||||
// Create l2geth container.
|
||||
l2gethImg = docker.NewTestL2Docker(t)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
|
||||
|
||||
// Create db container.
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
// Create l1geth and l2geth client.
|
||||
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create l1 and l2 auth
|
||||
l1Auth = prepareAuth(t, l1Client, privateKey)
|
||||
l2Auth = prepareAuth(t, l2Client, privateKey)
|
||||
|
||||
// send some balance to message and rollup sender
|
||||
transferEther(t, l1Auth, l1Client, messagePrivateKey)
|
||||
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, messagePrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
|
||||
}
|
||||
|
||||
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
|
||||
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
|
||||
|
||||
gasPrice, err := client.SuggestGasPrice(context.Background())
|
||||
assert.NoError(t, err)
|
||||
gasPrice.Mul(gasPrice, big.NewInt(2))
|
||||
|
||||
// Get pending nonce
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// 200 ether should be enough
|
||||
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
|
||||
assert.Equal(t, ok, true)
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: &targetAddress,
|
||||
Value: value,
|
||||
Gas: 500000,
|
||||
GasPrice: gasPrice,
|
||||
})
|
||||
signedTx, err := auth.Signer(auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
|
||||
assert.NoError(t, err)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
}
|
||||
|
||||
func free(t *testing.T) {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
if l1gethImg != nil {
|
||||
assert.NoError(t, l1gethImg.Stop())
|
||||
}
|
||||
if l2gethImg != nil {
|
||||
assert.NoError(t, l2gethImg.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
func prepareContracts(t *testing.T) {
|
||||
var err error
|
||||
var tx *types.Transaction
|
||||
|
||||
// L1 messenger contract
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 rollup contract
|
||||
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L2 messenger contract
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
|
||||
assert.NoError(t, err)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
|
||||
cfg.L1Config.RollupContractAddress = l1RollupAddress
|
||||
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
|
||||
|
||||
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
|
||||
assert.NoError(t, err)
|
||||
auth.Value = big.NewInt(0) // in wei
|
||||
assert.NoError(t, err)
|
||||
return auth
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l2 message
|
||||
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
}
|
||||
174
bridge/tests/l2_message_relay_test.go
Normal file
174
bridge/tests/l2_message_relay_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
func testRelayL2MessageSucceed(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L2Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l2 watch process events
|
||||
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
|
||||
|
||||
// check db status
|
||||
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgPending)
|
||||
assert.Equal(t, msg.Sender, l2Auth.From.String())
|
||||
assert.Equal(t, msg.Target, l1Auth.From.String())
|
||||
|
||||
// add fake blocks
|
||||
traces := []*types.BlockTrace{
|
||||
{
|
||||
Header: &types.Header{
|
||||
Number: sendReceipt.BlockNumber,
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake batch
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[0].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch CommitBatch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch FinalizeBatch events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
|
||||
// process l2 messages
|
||||
l2Relayer.ProcessSavedEvents()
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgSubmitted)
|
||||
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, relayTxHash.Valid)
|
||||
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgConfirmed)
|
||||
}
|
||||
132
bridge/tests/rollup_test.go
Normal file
132
bridge/tests/rollup_test.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
var parentHash common.Hash
|
||||
for i := 1; i <= 10; i++ {
|
||||
header := types.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
ParentHash: parentHash,
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
traces = append(traces, &types.BlockTrace{
|
||||
Header: &header,
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
})
|
||||
parentHash = header.Hash()
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add one batch to db
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[1].Header.Number.Uint64(),
|
||||
Hash: traces[1].Header.Hash().String(),
|
||||
ParentHash: traces[1].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[1].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
}
|
||||
56
bridge/utils/confirmation.go
Normal file
56
bridge/utils/confirmation.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type ethClient interface {
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
|
||||
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
|
||||
switch true {
|
||||
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
|
||||
var tag *big.Int
|
||||
if confirm == rpc.FinalizedBlockNumber {
|
||||
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
|
||||
} else {
|
||||
tag = big.NewInt(int64(rpc.SafeBlockNumber))
|
||||
}
|
||||
|
||||
header, err := client.HeaderByNumber(ctx, tag)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !header.Number.IsInt64() {
|
||||
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
|
||||
}
|
||||
return header.Number.Uint64(), nil
|
||||
case confirm == rpc.LatestBlockNumber:
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return number, nil
|
||||
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cfmNum := uint64(confirm.Int64())
|
||||
|
||||
if number >= cfmNum {
|
||||
return number - cfmNum, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
|
||||
}
|
||||
}
|
||||
107
bridge/utils/confirmation_test.go
Normal file
107
bridge/utils/confirmation_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
tests = []struct {
|
||||
input string
|
||||
mustFail bool
|
||||
expected rpc.BlockNumber
|
||||
}{
|
||||
{`"0x"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x0"`, false, rpc.BlockNumber(0)},
|
||||
{`"0X1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x00"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x01"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x12"`, false, rpc.BlockNumber(18)},
|
||||
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
|
||||
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
|
||||
{"0", true, rpc.BlockNumber(0)},
|
||||
{`"ff"`, true, rpc.BlockNumber(0)},
|
||||
{`"safe"`, false, rpc.SafeBlockNumber},
|
||||
{`"finalized"`, false, rpc.FinalizedBlockNumber},
|
||||
{`"pending"`, false, rpc.PendingBlockNumber},
|
||||
{`"latest"`, false, rpc.LatestBlockNumber},
|
||||
{`"earliest"`, false, rpc.EarliestBlockNumber},
|
||||
{`someString`, true, rpc.BlockNumber(0)},
|
||||
{`""`, true, rpc.BlockNumber(0)},
|
||||
{``, true, rpc.BlockNumber(0)},
|
||||
}
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
err := json.Unmarshal([]byte(test.input), &num)
|
||||
if test.mustFail && err == nil {
|
||||
t.Errorf("Test %d should fail", i)
|
||||
continue
|
||||
}
|
||||
if !test.mustFail && err != nil {
|
||||
t.Errorf("Test %d should pass but got err: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if num != test.expected {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
want, err := json.Marshal(test.expected)
|
||||
assert.Nil(t, err)
|
||||
if !test.mustFail {
|
||||
err = json.Unmarshal([]byte(test.input), &num)
|
||||
assert.Nil(t, err)
|
||||
got, err := json.Marshal(&num)
|
||||
assert.Nil(t, err)
|
||||
if string(want) != string(got) {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MockEthClient struct {
|
||||
val uint64
|
||||
}
|
||||
|
||||
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
return e.val, nil
|
||||
}
|
||||
|
||||
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
|
||||
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
|
||||
}
|
||||
|
||||
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := MockEthClient{}
|
||||
|
||||
client.val = 5
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(0), confirmed)
|
||||
|
||||
client.val = 7
|
||||
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(1), confirmed)
|
||||
}
|
||||
@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
|
||||
|
||||
// ComputeMessageHash compute the message hash
|
||||
func ComputeMessageHash(
|
||||
target common.Address,
|
||||
sender common.Address,
|
||||
target common.Address,
|
||||
value *big.Int,
|
||||
fee *big.Int,
|
||||
deadline *big.Int,
|
||||
@@ -29,8 +29,8 @@ func ComputeMessageHash(
|
||||
messageNonce *big.Int,
|
||||
) common.Hash {
|
||||
packed := encodePacked(
|
||||
target.Bytes(),
|
||||
sender.Bytes(),
|
||||
target.Bytes(),
|
||||
math.U256Bytes(value),
|
||||
math.U256Bytes(fee),
|
||||
math.U256Bytes(deadline),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
@@ -28,15 +29,13 @@ func TestKeccak2(t *testing.T) {
|
||||
|
||||
func TestComputeMessageHash(t *testing.T) {
|
||||
hash := utils.ComputeMessageHash(
|
||||
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
|
||||
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
|
||||
big.NewInt(1),
|
||||
big.NewInt(2),
|
||||
big.NewInt(1234567),
|
||||
common.Hex2Bytes("0011223344"),
|
||||
big.NewInt(3),
|
||||
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
|
||||
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
|
||||
big.NewInt(5000000000000000),
|
||||
big.NewInt(0),
|
||||
big.NewInt(1674204924),
|
||||
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
|
||||
big.NewInt(30706),
|
||||
)
|
||||
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
|
||||
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
|
||||
}
|
||||
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
|
||||
}
|
||||
|
||||
@@ -179,6 +179,13 @@ linters:
|
||||
- depguard
|
||||
- gocyclo
|
||||
- unparam
|
||||
- exportloopref
|
||||
- sqlclosecheck
|
||||
- rowserrcheck
|
||||
- durationcheck
|
||||
- bidichk
|
||||
- typecheck
|
||||
- unused
|
||||
enable-all: false
|
||||
disable:
|
||||
|
||||
@@ -200,16 +207,7 @@ issues:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gocyclo
|
||||
- errcheck
|
||||
- dupl
|
||||
- gosec
|
||||
|
||||
# Exclude known linters from partially hard-vendored code,
|
||||
# which is impossible to exclude via "nolint" comments.
|
||||
- path: internal/hmac/
|
||||
text: "weak cryptographic primitive"
|
||||
linters:
|
||||
- gosec
|
||||
|
||||
# Exclude some staticcheck messages
|
||||
@@ -217,18 +215,6 @@ issues:
|
||||
- staticcheck
|
||||
text: "SA9003:"
|
||||
|
||||
- linters:
|
||||
- golint
|
||||
text: "package comment should be of the form"
|
||||
|
||||
- linters:
|
||||
- golint
|
||||
text: "don't use ALL_CAPS in Go names;"
|
||||
|
||||
- linters:
|
||||
- golint
|
||||
text: "don't use underscores in Go names;"
|
||||
|
||||
# Exclude lll issues for long lines with go:generate
|
||||
- linters:
|
||||
- lll
|
||||
|
||||
@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridge
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.17-rust-nightly-2022-08-23 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -24,6 +24,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
@@ -31,12 +32,15 @@ RUN go mod download -x
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN mkdir -p /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build db_cli
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
GO_VERSION := 1.18
|
||||
PYTHON_VERSION := 3.10
|
||||
RUST_VERSION := nightly-2022-08-23
|
||||
RUST_VERSION := nightly-2022-12-10
|
||||
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
|
||||
|
||||
@@ -4,4 +4,4 @@ FROM golang:1.18-alpine
|
||||
|
||||
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM golang:1.18-alpine
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
# RUN apk add --no-cache libc6-compat
|
||||
# RUN apk add --no-cache gcompat
|
||||
|
||||
@@ -14,7 +14,7 @@ ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
# TODO: make this ARG
|
||||
ENV CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
openssl-dev \
|
||||
gcc \
|
||||
git \
|
||||
musl-dev
|
||||
|
||||
@@ -13,4 +13,4 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
14
build/post-test-report-coverage.sh
Executable file
14
build/post-test-report-coverage.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
package1=coverage.bridge.xml \
|
||||
package2=coverage.db.xml \
|
||||
package3=coverage.common.xml \
|
||||
package4=coverage.coordinator.xml \
|
||||
package5=coverage.integration.xml
|
||||
63
build/push-docker-tag.Jenkinsfile
Normal file
63
build/push-docker-tag.Jenkinsfile
Normal file
@@ -0,0 +1,63 @@
|
||||
imagePrefix = 'scrolltech'
|
||||
credentialDocker = 'dockerhub'
|
||||
|
||||
pipeline {
|
||||
agent any
|
||||
options {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
// LOG_DOCKER = 'true'
|
||||
}
|
||||
stages {
|
||||
stage('Tag') {
|
||||
steps {
|
||||
script {
|
||||
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
|
||||
sh "echo ${TAGNAME}"
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
environment {
|
||||
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
|
||||
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
|
||||
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
|
||||
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
|
||||
DOCKER_CREDENTIALS = credentials('dockerhub')
|
||||
}
|
||||
steps {
|
||||
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
|
||||
// Use a scripted pipeline.
|
||||
script {
|
||||
stage('Push image') {
|
||||
if (TAGNAME == ""){
|
||||
return;
|
||||
}
|
||||
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
|
||||
sh "make -C bridge docker"
|
||||
sh "make -C coordinator docker"
|
||||
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
|
||||
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
|
||||
sh "docker push scrolltech/bridge:${TAGNAME}"
|
||||
sh "docker push scrolltech/coordinator:${TAGNAME}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
89
common/cmd/cmd.go
Normal file
89
common/cmd/cmd.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
|
||||
func init() {
|
||||
v := os.Getenv("LOG_DOCKER")
|
||||
if v == "true" || v == "TRUE" {
|
||||
verbose = true
|
||||
}
|
||||
}
|
||||
|
||||
type checkFunc func(buf string)
|
||||
|
||||
// Cmd struct
|
||||
type Cmd struct {
|
||||
*testing.T
|
||||
|
||||
name string
|
||||
args []string
|
||||
|
||||
mu sync.Mutex
|
||||
cmd *exec.Cmd
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
//stdout bytes.Buffer
|
||||
Err error
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
T: t,
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
func (t *Cmd) RegistFunc(key string, check checkFunc) {
|
||||
t.checkFuncs.Set(key, check)
|
||||
}
|
||||
|
||||
// UnRegistFunc unregister check func
|
||||
func (t *Cmd) UnRegistFunc(key string) {
|
||||
t.checkFuncs.Pop(key)
|
||||
}
|
||||
|
||||
func (t *Cmd) runCmd() {
|
||||
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = t
|
||||
cmd.Stderr = t
|
||||
_ = cmd.Run()
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (t *Cmd) RunCmd(parallel bool) {
|
||||
t.Log("cmd: ", t.args)
|
||||
if parallel {
|
||||
go t.runCmd()
|
||||
} else {
|
||||
t.runCmd()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose {
|
||||
t.Logf("%s: %v", t.name, out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
t.Logf("%s: %v", t.name, out)
|
||||
}
|
||||
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
|
||||
check := value.(checkFunc)
|
||||
check(out)
|
||||
})
|
||||
return len(data), nil
|
||||
}
|
||||
114
common/cmd/cmd_app.go
Normal file
114
common/cmd/cmd_app.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// RunApp exec's the current binary using name as argv[0] which will trigger the
|
||||
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
|
||||
func (t *Cmd) RunApp(waitResult func() bool) {
|
||||
t.Log("cmd: ", append([]string{t.name}, t.args...))
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{t.name}, t.args...),
|
||||
Stderr: t,
|
||||
Stdout: t,
|
||||
}
|
||||
if waitResult != nil {
|
||||
go func() {
|
||||
_ = cmd.Run()
|
||||
}()
|
||||
waitResult()
|
||||
} else {
|
||||
_ = cmd.Run()
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
t.cmd = cmd
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (t *Cmd) WaitExit() {
|
||||
// Wait all the check funcs are finished or test status is failed.
|
||||
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
|
||||
<-time.After(time.Millisecond * 500)
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
t.mu.Lock()
|
||||
_ = t.cmd.Process.Signal(os.Interrupt)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// Interrupt send interrupt signal.
|
||||
func (t *Cmd) Interrupt() {
|
||||
t.mu.Lock()
|
||||
t.Err = t.cmd.Process.Signal(os.Interrupt)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitResult return true when get the keyword during timeout.
|
||||
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
|
||||
if keyword == "" {
|
||||
return false
|
||||
}
|
||||
okCh := make(chan struct{}, 1)
|
||||
t.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer t.UnRegistFunc(keyword)
|
||||
select {
|
||||
case <-okCh:
|
||||
return true
|
||||
case <-time.After(timeout):
|
||||
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExpectWithTimeout wait result during timeout time.
|
||||
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
|
||||
if keyword == "" {
|
||||
return
|
||||
}
|
||||
okCh := make(chan struct{}, 1)
|
||||
t.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
waitResult := func() {
|
||||
defer t.UnRegistFunc(keyword)
|
||||
select {
|
||||
case <-okCh:
|
||||
return
|
||||
case <-time.After(timeout):
|
||||
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
|
||||
}
|
||||
}
|
||||
|
||||
if parallel {
|
||||
go waitResult()
|
||||
} else {
|
||||
waitResult()
|
||||
}
|
||||
}
|
||||
42
common/cmd/cmd_test.go
Normal file
42
common/cmd/cmd_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package cmd_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
|
||||
|
||||
tm := time.Now()
|
||||
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
|
||||
|
||||
okCh := make(chan struct{}, 1)
|
||||
app.RegistFunc(curTime, func(buf string) {
|
||||
if strings.Contains(buf, curTime) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer app.UnRegistFunc(curTime)
|
||||
|
||||
// Run cmd.
|
||||
app.RunCmd(true)
|
||||
|
||||
// Wait result.
|
||||
select {
|
||||
case <-okCh:
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
|
||||
func init() {
|
||||
v := os.Getenv("LOG_DOCKER")
|
||||
if v == "true" || v == "TRUE" {
|
||||
verbose = true
|
||||
}
|
||||
}
|
||||
|
||||
type checkFunc func(buf string)
|
||||
|
||||
// Cmd struct
|
||||
type Cmd struct {
|
||||
*testing.T
|
||||
|
||||
checkFuncs sync.Map //map[string]checkFunc
|
||||
|
||||
//stdout bytes.Buffer
|
||||
errMsg chan error
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(t *testing.T) *Cmd {
|
||||
cmd := &Cmd{
|
||||
T: t,
|
||||
//stdout: bytes.Buffer{},
|
||||
errMsg: make(chan error, 2),
|
||||
}
|
||||
// Handle panic.
|
||||
cmd.RegistFunc("panic", func(buf string) {
|
||||
if strings.Contains(buf, "panic") {
|
||||
cmd.errMsg <- errors.New(buf)
|
||||
}
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
func (t *Cmd) RegistFunc(key string, check checkFunc) {
|
||||
t.checkFuncs.Store(key, check)
|
||||
}
|
||||
|
||||
// UnRegistFunc unregister check func
|
||||
func (t *Cmd) UnRegistFunc(key string) {
|
||||
if _, ok := t.checkFuncs.Load(key); ok {
|
||||
t.checkFuncs.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (t *Cmd) RunCmd(args []string, parallel bool) {
|
||||
t.Log("RunCmd cmd", args)
|
||||
if parallel {
|
||||
go t.runCmd(args)
|
||||
} else {
|
||||
t.runCmd(args)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMsg return error output channel
|
||||
func (t *Cmd) ErrMsg() <-chan error {
|
||||
return t.errMsg
|
||||
}
|
||||
|
||||
func (t *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose {
|
||||
t.Logf(out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
t.Logf(out)
|
||||
}
|
||||
go func(content string) {
|
||||
t.checkFuncs.Range(func(key, value any) bool {
|
||||
check := value.(checkFunc)
|
||||
check(content)
|
||||
return true
|
||||
})
|
||||
}(out)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (t *Cmd) runCmd(args []string) {
|
||||
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = t
|
||||
cmd.Stderr = t
|
||||
_ = cmd.Run()
|
||||
}
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
@@ -21,19 +24,20 @@ type ImgDB struct {
|
||||
password string
|
||||
|
||||
running bool
|
||||
*Cmd
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(t *testing.T, image, password, dbName string, port int) ImgInstance {
|
||||
return &ImgDB{
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
Cmd: NewCmd(t),
|
||||
}
|
||||
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
@@ -42,7 +46,7 @@ func (i *ImgDB) Start() error {
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.Cmd.RunCmd(i.prepare(), true)
|
||||
i.cmd.RunCmd(true)
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
@@ -59,14 +63,13 @@ func (i *ImgDB) Stop() error {
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
@@ -94,7 +97,7 @@ func (i *ImgDB) prepare() []string {
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.RegistFunc(keyword, func(buf string) {
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
@@ -103,14 +106,16 @@ func (i *ImgDB) isOk() bool {
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.UnRegistFunc(keyword)
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
time.Sleep(time.Millisecond * 1500)
|
||||
i.id = GetContainerID(i.name)
|
||||
utils.TryTimes(3, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case <-time.NewTimer(time.Second * 10).C:
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
@@ -23,20 +26,21 @@ type ImgGeth struct {
|
||||
wsPort int
|
||||
|
||||
running bool
|
||||
*Cmd
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
|
||||
return &ImgGeth{
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
Cmd: NewCmd(t),
|
||||
}
|
||||
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
@@ -45,7 +49,7 @@ func (i *ImgGeth) Start() error {
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.Cmd.RunCmd(i.prepare(), true)
|
||||
i.cmd.RunCmd(true)
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
@@ -72,7 +76,7 @@ func (i *ImgGeth) Endpoint() string {
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.RegistFunc(keyword, func(buf string) {
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
@@ -81,13 +85,16 @@ func (i *ImgGeth) isOk() bool {
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.UnRegistFunc(keyword)
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
i.id = GetContainerID(i.name)
|
||||
utils.TryTimes(3, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case <-time.NewTimer(time.Second * 10).C:
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,19 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
func TestDocker(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("testL1Geth", testL1Geth)
|
||||
t.Run("testL2Geth", testL2Geth)
|
||||
t.Run("testDB", testDB)
|
||||
}
|
||||
|
||||
func testL1Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
|
||||
assert.NoError(t, img.Start())
|
||||
img := NewTestL1Docker(t)
|
||||
defer img.Stop()
|
||||
|
||||
client, err := ethclient.Dial(img.Endpoint())
|
||||
@@ -26,12 +33,11 @@ func TestL1Geth(t *testing.T) {
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
func testL2Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
|
||||
assert.NoError(t, img.Start())
|
||||
img := NewTestL2Docker(t)
|
||||
defer img.Stop()
|
||||
|
||||
client, err := ethclient.Dial(img.Endpoint())
|
||||
@@ -42,7 +48,7 @@ func TestL2Geth(t *testing.T) {
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
func testDB(t *testing.T) {
|
||||
driverName := "postgres"
|
||||
dbImg := NewTestDBDocker(t, driverName)
|
||||
defer dbImg.Stop()
|
||||
|
||||
@@ -36,7 +36,7 @@ func GetContainerID(name string) string {
|
||||
Filters: filter,
|
||||
})
|
||||
if len(lst) > 0 {
|
||||
return lst[0].Names[0]
|
||||
return lst[0].ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -11,6 +11,6 @@ if [ ! -n "${IPC_PATH}" ];then
|
||||
IPC_PATH="/tmp/l1geth_path.ipc"
|
||||
fi
|
||||
|
||||
exec geth --mine --datadir "." --unlock 0 --password "./password" --allow-insecure-unlock --nodiscover \
|
||||
exec geth --mine --datadir "." --unlock 0 --miner.etherbase "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63" --password "./password" --allow-insecure-unlock --nodiscover \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --ws --ws.addr "0.0.0.0" --ws.port 8546 --ipcpath ${IPC_PATH}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:prealpha-v4.2
|
||||
FROM scrolltech/l2geth:prealpha-v5.1
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -19,6 +24,18 @@ func NewTestL1Docker(t *testing.T) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL1geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(3, func() bool {
|
||||
client, _ := ethclient.Dial(imgL1geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL1geth
|
||||
}
|
||||
|
||||
@@ -27,6 +44,18 @@ func NewTestL2Docker(t *testing.T) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL2geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(3, func() bool {
|
||||
client, _ := ethclient.Dial(imgL2geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL2geth
|
||||
}
|
||||
|
||||
@@ -35,5 +64,15 @@ func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgDB.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
utils.TryTimes(5, func() bool {
|
||||
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
|
||||
if db != nil {
|
||||
return db.Ping() == nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgDB
|
||||
}
|
||||
|
||||
@@ -3,15 +3,15 @@ module scroll-tech/common
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -67,7 +67,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
@@ -77,11 +77,12 @@ require (
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.3.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -112,8 +112,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -364,6 +364,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
|
||||
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||
@@ -402,11 +404,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -480,8 +481,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -539,8 +540,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -604,8 +605,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -617,8 +618,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
12
common/libzkp/impl/Cargo.lock
generated
12
common/libzkp/impl/Cargo.lock
generated
@@ -1380,7 +1380,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-mpt-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#b0ffab97316f9cf4b9e65aba398a047a8d6424a1"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
|
||||
dependencies = [
|
||||
"bitvec 0.22.3",
|
||||
"ff 0.11.1",
|
||||
@@ -1451,7 +1451,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
|
||||
dependencies = [
|
||||
"blake2b_simd",
|
||||
"cfg-if 0.1.10",
|
||||
@@ -2397,7 +2397,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "poseidon"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
|
||||
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
|
||||
dependencies = [
|
||||
"group",
|
||||
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
|
||||
@@ -2407,7 +2407,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "poseidon"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
|
||||
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
|
||||
dependencies = [
|
||||
"group",
|
||||
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
|
||||
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
|
||||
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
|
||||
dependencies = [
|
||||
"base64 0.13.0",
|
||||
"blake2",
|
||||
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
|
||||
[[package]]
|
||||
name = "zkevm"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
|
||||
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"blake2",
|
||||
|
||||
@@ -5,11 +5,11 @@ edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lib]
|
||||
crate-type = ["staticlib"]
|
||||
crate-type = ["dylib"]
|
||||
|
||||
[dependencies]
|
||||
zkevm = { git = "https://github.com/scroll-tech/common-rs" }
|
||||
types = { git = "https://github.com/scroll-tech/common-rs" }
|
||||
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
|
||||
|
||||
log = "0.4"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from roller to scroll
|
||||
@@ -36,12 +36,12 @@ type AuthMsg struct {
|
||||
type Identity struct {
|
||||
// Roller name
|
||||
Name string `json:"name"`
|
||||
// Time of message creation
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
// Unverified Unix timestamp of message creation
|
||||
Timestamp uint32 `json:"timestamp"`
|
||||
// Roller public key
|
||||
PublicKey string `json:"publicKey"`
|
||||
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
|
||||
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
|
||||
Version string `json:"version"`
|
||||
// Random unique token generated by manager
|
||||
Token string `json:"token"`
|
||||
@@ -115,12 +115,11 @@ func (a *AuthMsg) PublicKey() (string, error) {
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
bs, err := json.Marshal(i)
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(bs)
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
@@ -204,12 +203,12 @@ type ProofDetail struct {
|
||||
|
||||
// Hash return proofMsg content hash.
|
||||
func (z *ProofDetail) Hash() ([]byte, error) {
|
||||
bs, err := json.Marshal(z)
|
||||
byt, err := rlp.EncodeToBytes(z)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(bs)
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -15,7 +16,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
authMsg := &AuthMsg{
|
||||
Identity: &Identity{
|
||||
Name: "testRoller",
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
@@ -23,4 +24,10 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
ok, err := authMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check public key is ok.
|
||||
pub, err := authMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
|
||||
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
|
||||
}
|
||||
|
||||
53
common/metrics/metrics.go
Normal file
53
common/metrics/metrics.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(metrics.DefaultRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
// GetEnvWithDefault get value from env if is none use the default
|
||||
func GetEnvWithDefault(key string, defult string) string {
|
||||
val := os.Getenv(key)
|
||||
if len(val) == 0 {
|
||||
val = defult
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package utils
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// CommonFlags is used for app common flags in different modules
|
||||
@@ -10,6 +12,9 @@ var (
|
||||
&LogFileFlag,
|
||||
&LogJSONFormat,
|
||||
&LogDebugFlag,
|
||||
&MetricsEnabled,
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -40,4 +45,25 @@ var (
|
||||
Name: "log.debug",
|
||||
Usage: "Prepends log messages with call-site location (file and line number)",
|
||||
}
|
||||
// MetricsEnabled enable metrics collection and reporting
|
||||
MetricsEnabled = cli.BoolFlag{
|
||||
Name: "metrics",
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
Category: "METRICS",
|
||||
Value: false,
|
||||
}
|
||||
// MetricsAddr is listening address of Metrics reporting server
|
||||
MetricsAddr = cli.StringFlag{
|
||||
Name: "metrics.addr",
|
||||
Usage: "Metrics reporting server listening address",
|
||||
Category: "METRICS",
|
||||
Value: "0.0.0.0",
|
||||
}
|
||||
// MetricsPort is listening port of Metrics reporting server
|
||||
MetricsPort = cli.IntFlag{
|
||||
Name: "metrics.port",
|
||||
Usage: "Metrics reporting server listening port",
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -38,10 +38,14 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr,
|
||||
}
|
||||
|
||||
// StartWSEndpoint starts the WS RPC endpoint.
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr, error) {
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, compressionLevel int) (*http.Server, net.Addr, error) {
|
||||
handler, addr, err := StartHTTPEndpoint(endpoint, apis)
|
||||
if err == nil {
|
||||
srv := (handler.Handler).(*rpc.Server)
|
||||
err = srv.SetCompressionLevel(compressionLevel)
|
||||
if err != nil {
|
||||
log.Error("failed to set ws compression level", "compression level", compressionLevel, "err", err)
|
||||
}
|
||||
handler.Handler = srv.WebsocketHandler(nil)
|
||||
}
|
||||
return handler, addr, err
|
||||
|
||||
78
common/utils/rpc_test.go
Normal file
78
common/utils/rpc_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type testService struct{}
|
||||
|
||||
type echoArgs struct {
|
||||
S string
|
||||
}
|
||||
|
||||
type echoResult struct {
|
||||
Name string
|
||||
ID int
|
||||
Args *echoArgs
|
||||
}
|
||||
|
||||
func (s *testService) NoArgsRets() {}
|
||||
|
||||
func (s *testService) Echo(str string, i int, args *echoArgs) echoResult {
|
||||
return echoResult{str, i, args}
|
||||
}
|
||||
|
||||
func TestStartHTTPEndpoint(t *testing.T) {
|
||||
endpoint := "localhost:18080"
|
||||
handler, _, err := StartHTTPEndpoint(endpoint, []rpc.API{
|
||||
{
|
||||
Public: true,
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
client, err := rpc.Dial("http://" + endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
|
||||
|
||||
result := echoResult{}
|
||||
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
|
||||
assert.Equal(t, 0, result.ID)
|
||||
assert.Equal(t, "test", result.Name)
|
||||
|
||||
defer client.Close()
|
||||
}
|
||||
|
||||
func TestStartWSEndpoint(t *testing.T) {
|
||||
endpoint := "localhost:18081"
|
||||
handler, _, err := StartWSEndpoint(endpoint, []rpc.API{
|
||||
{
|
||||
Public: true,
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
}, flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
client, err := rpc.Dial("ws://" + endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
|
||||
|
||||
result := echoResult{}
|
||||
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
|
||||
assert.Equal(t, 0, result.ID)
|
||||
assert.Equal(t, "test", result.Name)
|
||||
|
||||
defer client.Close()
|
||||
}
|
||||
22
common/utils/simulation.go
Normal file
22
common/utils/simulation.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
func RegisterSimulation(app *cli.App, name string) {
|
||||
// Run the app for integration-test
|
||||
reexec.Register(name, func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
reexec.Init()
|
||||
}
|
||||
@@ -1,30 +1,10 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ComputeTraceGasCost computes gascost based on ExecutionResults.StructLogs.GasCost
|
||||
func ComputeTraceGasCost(trace *types.BlockTrace) uint64 {
|
||||
var (
|
||||
gasCost uint64
|
||||
eg errgroup.Group
|
||||
)
|
||||
for idx := range trace.ExecutionResults {
|
||||
i := idx
|
||||
eg.Go(func() error {
|
||||
var sum uint64
|
||||
for _, log := range trace.ExecutionResults[i].StructLogs {
|
||||
sum += log.GasCost
|
||||
}
|
||||
atomic.AddUint64(&gasCost, sum)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
_ = eg.Wait()
|
||||
|
||||
return gasCost
|
||||
return trace.Header.GasUsed
|
||||
}
|
||||
|
||||
@@ -19,13 +19,7 @@ func TestComputeTraceCost(t *testing.T) {
|
||||
blockTrace := &types.BlockTrace{}
|
||||
err = json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
assert.NoError(t, err)
|
||||
var sum uint64
|
||||
for _, v := range blockTrace.ExecutionResults {
|
||||
for _, sv := range v.StructLogs {
|
||||
sum += sv.GasCost
|
||||
}
|
||||
}
|
||||
|
||||
res := utils.ComputeTraceGasCost(blockTrace)
|
||||
assert.Equal(t, sum, res)
|
||||
var expected = blockTrace.Header.GasUsed
|
||||
got := utils.ComputeTraceGasCost(blockTrace)
|
||||
assert.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
13
common/utils/utils.go
Normal file
13
common/utils/utils.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package utils
|
||||
|
||||
import "time"
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
func TryTimes(times int, run func() bool) {
|
||||
for i := 0; times == -1 || i < times; i++ {
|
||||
if run() {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "prealpha-v8.2"
|
||||
var tag = "prealpha-v13.2"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -22,8 +22,8 @@ var commit = func() string {
|
||||
return ""
|
||||
}()
|
||||
|
||||
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/common-rs
|
||||
var ZK_VERSION string
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
|
||||
var ZkVersion string
|
||||
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
|
||||
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZK_VERSION)
|
||||
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
|
||||
|
||||
@@ -232,6 +232,50 @@ function initialize(uint256 _chainId) external nonpayable
|
||||
|---|---|---|
|
||||
| _chainId | uint256 | undefined |
|
||||
|
||||
### isBlockFinalized
|
||||
|
||||
```solidity
|
||||
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
|
||||
```
|
||||
|
||||
Return whether the block is finalized by block hash.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _blockHash | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### isBlockFinalized
|
||||
|
||||
```solidity
|
||||
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
|
||||
```
|
||||
|
||||
Return whether the block is finalized by block height.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _blockHeight | uint256 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### lastFinalizedBatchID
|
||||
|
||||
```solidity
|
||||
|
||||
Submodule contracts/lib/forge-std updated: cb69e9c07f...662ae0d693
@@ -13,17 +13,34 @@ import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { L2TxFeeVault } from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20 } from "../../src/libraries/token/ScrollStandardERC20.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract DeployL2BridgeContracts is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
address L1_TX_FEE_RECIPIENT_ADDR = vm.envAddress("L1_TX_FEE_RECIPIENT_ADDR");
|
||||
|
||||
L2ScrollMessenger messenger;
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
address L2_SCROLL_MESSENGER_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_MESSENGER_PREDEPLOY_ADDR", address(0));
|
||||
address L2_TX_FEE_VAULT_PREDEPLOY_ADDR = vm.envOr("L2_TX_FEE_VAULT_PREDEPLOY_ADDR", address(0));
|
||||
address L2_PROXY_ADMIN_PREDEPLOY_ADDR = vm.envOr("L2_PROXY_ADMIN_PREDEPLOY_ADDR", address(0));
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_WHITELIST_PREDEPLOY_ADDR = vm.envOr("L2_WHITELIST_PREDEPLOY_ADDR", address(0));
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployL2ScrollMessenger();
|
||||
deployTxFeeVault();
|
||||
deployProxyAdmin();
|
||||
deployL2StandardERC20Gateway();
|
||||
deployL2GatewayRouter();
|
||||
@@ -31,24 +48,51 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2CustomERC20Gateway();
|
||||
deployL2ERC721Gateway();
|
||||
deployL2ERC1155Gateway();
|
||||
deployL2Whitelist();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployL2ScrollMessenger() internal {
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
L2ScrollMessenger l2ScrollMessenger = new L2ScrollMessenger(owner);
|
||||
if (L2_SCROLL_MESSENGER_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(L2_SCROLL_MESSENGER_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(l2ScrollMessenger));
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
messenger = new L2ScrollMessenger(owner);
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(messenger));
|
||||
}
|
||||
|
||||
function deployTxFeeVault() internal {
|
||||
if (L2_TX_FEE_VAULT_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(L2_TX_FEE_VAULT_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2TxFeeVault feeVault = new L2TxFeeVault(address(messenger), L1_TX_FEE_RECIPIENT_ADDR);
|
||||
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(feeVault));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
if (L2_PROXY_ADMIN_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(L2_PROXY_ADMIN_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(proxyAdmin));
|
||||
}
|
||||
|
||||
function deployL2StandardERC20Gateway() internal {
|
||||
if (L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR", address(L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2StandardERC20Gateway impl = new L2StandardERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -57,6 +101,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2GatewayRouter() internal {
|
||||
if (L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_GATEWAY_ROUTER_PROXY_ADDR", address(L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2GatewayRouter impl = new L2GatewayRouter();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -65,6 +114,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployScrollStandardERC20Factory() internal {
|
||||
if (L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR", address(L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollStandardERC20 tokenImpl = new ScrollStandardERC20();
|
||||
ScrollStandardERC20Factory scrollStandardERC20Factory = new ScrollStandardERC20Factory(address(tokenImpl));
|
||||
|
||||
@@ -73,6 +127,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2CustomERC20Gateway() internal {
|
||||
if (L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2CustomERC20Gateway impl = new L2CustomERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -81,6 +140,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC721Gateway() internal {
|
||||
if (L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC721_GATEWAY_PROXY_ADDR", address(L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC721Gateway impl = new L2ERC721Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -89,6 +153,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC1155Gateway() internal {
|
||||
if (L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC1155Gateway impl = new L2ERC1155Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -96,6 +165,18 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL2Whitelist() internal {
|
||||
if (L2_WHITELIST_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_WHITELIST_ADDR", address(L2_WHITELIST_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Whitelist whitelist = new Whitelist(owner);
|
||||
|
||||
logAddress("L2_WHITELIST_ADDR", address(whitelist));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2CustomERC20Gateway } from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import { L2ERC1155Gateway } from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract InitializeL2BridgeContracts is Script {
|
||||
@@ -20,12 +22,14 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_ADDR");
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
address L2_WHITELIST_ADDR = vm.envAddress("L2_WHITELIST_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
@@ -69,6 +73,21 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L2_SCROLL_MESSENGER_ADDR
|
||||
);
|
||||
|
||||
// whitelist contracts which can call sendMessage
|
||||
{
|
||||
address[] memory gateways = new address[](6);
|
||||
gateways[0] = L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[1] = L2_GATEWAY_ROUTER_PROXY_ADDR;
|
||||
gateways[2] = L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[3] = L2_ERC1155_GATEWAY_PROXY_ADDR;
|
||||
gateways[4] = L2_ERC721_GATEWAY_PROXY_ADDR;
|
||||
gateways[5] = L2_TX_FEE_VAULT_ADDR;
|
||||
Whitelist(L2_WHITELIST_ADDR).updateWhitelistStatus(gateways, true);
|
||||
}
|
||||
|
||||
// update whitelist contract for messenger
|
||||
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_ADDR)).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
require(!isMessageExecuted[_msghash], "Message successfully executed");
|
||||
|
||||
// @todo check proof
|
||||
require(IZKRollup(rollup).verifyMessageStateProof(_proof.batchIndex, _proof.blockHeight), "invalid state proof");
|
||||
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
|
||||
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
|
||||
|
||||
// @todo check `_to` address to avoid attack.
|
||||
|
||||
@@ -59,6 +59,14 @@ interface IZKRollup {
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
|
||||
/// @notice Return whether the block is finalized by block hash.
|
||||
/// @param blockHash The hash of the block to query.
|
||||
function isBlockFinalized(bytes32 blockHash) external view returns (bool);
|
||||
|
||||
/// @notice Return whether the block is finalized by block height.
|
||||
/// @param blockHeight The height of the block to query.
|
||||
function isBlockFinalized(uint256 blockHeight) external view returns (bool);
|
||||
|
||||
/// @notice Return the message hash by index.
|
||||
/// @param _index The index to query.
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32);
|
||||
|
||||
@@ -89,6 +89,24 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function isBlockFinalized(bytes32 _blockHash) external view returns (bool) {
|
||||
// block not commited
|
||||
if (blocks[_blockHash].transactionRoot == bytes32(0)) return false;
|
||||
|
||||
uint256 _batchIndex = blocks[_blockHash].batchIndex;
|
||||
bytes32 _batchId = finalizedBatches[_batchIndex];
|
||||
return _batchId != bytes32(0);
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function isBlockFinalized(uint256 _blockHeight) external view returns (bool) {
|
||||
bytes32 _batchID = lastFinalizedBatchID;
|
||||
bytes32 _batchHash = batches[_batchID].batchHash;
|
||||
uint256 _maxHeight = blocks[_batchHash].blockHeight;
|
||||
return _blockHeight <= _maxHeight;
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32) {
|
||||
return messageQueue[_index];
|
||||
|
||||
@@ -86,12 +86,10 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
) external override {
|
||||
) external override onlyWhitelistedSender(msg.sender) {
|
||||
// anti reentrance
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
|
||||
// @todo only privileged accounts can call
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
require(_deadline >= block.timestamp, "Message expired");
|
||||
|
||||
|
||||
14
contracts/src/L2/predeploys/L2TxFeeVault.sol
Normal file
14
contracts/src/L2/predeploys/L2TxFeeVault.sol
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { FeeVault } from "../../libraries/FeeVault.sol";
|
||||
|
||||
/// @title L2TxFeeVault
|
||||
/// @notice The `L2TxFeeVault` contract collects all L2 transaction fees and allows withdrawing these fees to a predefined L1 address.
|
||||
/// The minimum withdrawal amount is 10 ether.
|
||||
contract L2TxFeeVault is FeeVault {
|
||||
/// @param _messenger The address of L2ScrollMessenger.
|
||||
/// @param _recipient The fee recipient address on L1.
|
||||
constructor(address _messenger, address _recipient) FeeVault(_messenger, _recipient, 10 ether) {}
|
||||
}
|
||||
108
contracts/src/libraries/FeeVault.sol
Normal file
108
contracts/src/libraries/FeeVault.sol
Normal file
@@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// MIT License
|
||||
|
||||
// Copyright (c) 2022 Optimism
|
||||
// Copyright (c) 2022 Scroll
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { IL2ScrollMessenger } from "../L2/IL2ScrollMessenger.sol";
|
||||
|
||||
/**
|
||||
* @title FeeVault
|
||||
* @notice The FeeVault contract contains the basic logic for the various different vault contracts
|
||||
* used to hold fee revenue generated by the L2 system.
|
||||
*/
|
||||
abstract contract FeeVault {
|
||||
/**
|
||||
* @notice Emits each time that a withdrawal occurs.
|
||||
*
|
||||
* @param value Amount that was withdrawn (in wei).
|
||||
* @param to Address that the funds were sent to.
|
||||
* @param from Address that triggered the withdrawal.
|
||||
*/
|
||||
event Withdrawal(uint256 value, address to, address from);
|
||||
|
||||
/**
|
||||
* @notice Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
uint256 public MIN_WITHDRAWAL_AMOUNT;
|
||||
|
||||
/**
|
||||
* @notice Scroll L2 messenger address.
|
||||
*/
|
||||
address public MESSENGER;
|
||||
|
||||
/**
|
||||
* @notice Wallet that will receive the fees on L1.
|
||||
*/
|
||||
address public RECIPIENT;
|
||||
|
||||
/**
|
||||
* @notice Total amount of wei processed by the contract.
|
||||
*/
|
||||
uint256 public totalProcessed;
|
||||
|
||||
/**
|
||||
* @param _recipient Wallet that will receive the fees on L1.
|
||||
* @param _minWithdrawalAmount Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
constructor(
|
||||
address _messenger,
|
||||
address _recipient,
|
||||
uint256 _minWithdrawalAmount
|
||||
) {
|
||||
MIN_WITHDRAWAL_AMOUNT = _minWithdrawalAmount;
|
||||
MESSENGER = _messenger;
|
||||
RECIPIENT = _recipient;
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice Allow the contract to receive ETH.
|
||||
*/
|
||||
receive() external payable {}
|
||||
|
||||
/**
|
||||
* @notice Triggers a withdrawal of funds to the L1 fee wallet.
|
||||
*/
|
||||
function withdraw() external {
|
||||
uint256 value = address(this).balance;
|
||||
|
||||
require(
|
||||
value >= MIN_WITHDRAWAL_AMOUNT,
|
||||
"FeeVault: withdrawal amount must be greater than minimum withdrawal amount"
|
||||
);
|
||||
|
||||
unchecked {
|
||||
totalProcessed += value;
|
||||
}
|
||||
|
||||
emit Withdrawal(value, RECIPIENT, msg.sender);
|
||||
|
||||
IL2ScrollMessenger(MESSENGER).sendMessage{ value: value }(
|
||||
RECIPIENT,
|
||||
0, // no fee provided
|
||||
bytes(""), // no message (simple eth transfer)
|
||||
0 // _gasLimit is not used for eth transfers
|
||||
);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user