mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 07:57:58 -05:00
Compare commits
39 Commits
prealpha-v
...
prealpha-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f9e23ff0e | ||
|
|
fa93de97de | ||
|
|
deedf7a5d0 | ||
|
|
73432127cd | ||
|
|
a78160ddad | ||
|
|
fff2517a76 | ||
|
|
eba7647e21 | ||
|
|
51076d21c3 | ||
|
|
077ed9839a | ||
|
|
bdcca55bd5 | ||
|
|
20b8e2bf6c | ||
|
|
cc596c42b3 | ||
|
|
7da717b251 | ||
|
|
bbdbf3995f | ||
|
|
7fb8bc6e29 | ||
|
|
b8fae294e4 | ||
|
|
23bc381f5c | ||
|
|
b4ade85a9c | ||
|
|
d04522027c | ||
|
|
7422bea51f | ||
|
|
abcc159390 | ||
|
|
a545954dbc | ||
|
|
dc6ef83fbd | ||
|
|
e17647bc9f | ||
|
|
feaa95aefe | ||
|
|
8ad8a1b6f0 | ||
|
|
22f6781c26 | ||
|
|
b165402e81 | ||
|
|
9ee8d977cb | ||
|
|
e1a6eb65f6 | ||
|
|
9096334eab | ||
|
|
c360cf52b1 | ||
|
|
00dc075d9c | ||
|
|
af77c9fa83 | ||
|
|
1c4ed0487a | ||
|
|
e4761d9694 | ||
|
|
fbc7e03c67 | ||
|
|
927011641d | ||
|
|
6eb71869e8 |
6
.github/workflows/bridge.yml
vendored
6
.github/workflows/bridge.yml
vendored
@@ -31,7 +31,11 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: pontem-network/get-solc@master
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
|
||||
85
Jenkinsfile
vendored
85
Jenkinsfile
vendored
@@ -8,6 +8,7 @@ pipeline {
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
@@ -25,6 +26,7 @@ pipeline {
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
parallel {
|
||||
@@ -67,7 +69,7 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
stage('Parallel Test') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
@@ -77,28 +79,77 @@ pipeline {
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
parallel{
|
||||
stage('Test bridge package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
|
||||
}
|
||||
}
|
||||
stage('Test common package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Test coordinator package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
|
||||
}
|
||||
}
|
||||
stage('Test database package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
|
||||
}
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
steps {
|
||||
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/...
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/...
|
||||
cd ..
|
||||
'''
|
||||
script {
|
||||
for (i in ['bridge', 'coordinator', 'database']) {
|
||||
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
|
||||
}
|
||||
}
|
||||
}
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'statusCheck', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
post {
|
||||
always {
|
||||
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
|
||||
@@ -5,7 +5,8 @@ IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/Mock_Bridge.sol --pkg mock_bridge --out mock_bridge/Mock_Bridge.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
|
||||
|
||||
bridge: ## Builds the Bridge instance.
|
||||
go build -o $(PWD)/build/bin/bridge ./cmd
|
||||
|
||||
@@ -22,16 +22,6 @@ make clean
|
||||
make bridge
|
||||
```
|
||||
|
||||
## DB config
|
||||
|
||||
* db settings in config
|
||||
|
||||
```bash
|
||||
# DB_DSN: db data source name
|
||||
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
|
||||
# DB_DRIVER: db driver name
|
||||
export DB_DRIVER="postgres"
|
||||
```
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
|
||||
125
bridge/cmd/app/app.go
Normal file
125
bridge/cmd/app/app.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set up Bridge app info.
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "bridge"
|
||||
app.Usage = "The Scroll Bridge"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `bridge-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "bridge-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
var (
|
||||
l1Backend *l1.Backend
|
||||
l2Backend *l2.Backend
|
||||
)
|
||||
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
|
||||
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
l1Backend.Stop()
|
||||
l2Backend.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = l1Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l1 backend", "error", err)
|
||||
}
|
||||
if err = l2Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l2 backend", "error", err)
|
||||
}
|
||||
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
l2Backend.APIs())
|
||||
if err != nil {
|
||||
log.Crit("Could not start RPC api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
|
||||
log.Info("Start bridge successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run run bridge cmd instance.
|
||||
func Run() {
|
||||
// Run the bridge.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
bridge/cmd/app/app_test.go
Normal file
19
bridge/cmd/app/app_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunBridge(t *testing.T) {
|
||||
bridge := cmd.NewCmd(t, "bridge-test", "--version")
|
||||
defer bridge.WaitExit()
|
||||
|
||||
// wait result
|
||||
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
|
||||
bridge.RunApp(false)
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package main
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
@@ -26,22 +28,4 @@ var (
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8290,
|
||||
}
|
||||
|
||||
l1Flags = []cli.Flag{
|
||||
&l1UrlFlag,
|
||||
}
|
||||
l1UrlFlag = cli.StringFlag{
|
||||
Name: "l1.endpoint",
|
||||
Usage: "The endpoint connect to l1chain node",
|
||||
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
}
|
||||
|
||||
l2Flags = []cli.Flag{
|
||||
&l2UrlFlag,
|
||||
}
|
||||
l2UrlFlag = cli.StringFlag{
|
||||
Name: "l2.endpoint",
|
||||
Usage: "The endpoint connect to l2chain node",
|
||||
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
}
|
||||
)
|
||||
@@ -1,125 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
import "scroll-tech/bridge/cmd/app"
|
||||
|
||||
func main() {
|
||||
// Set up Bridge app info.
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "bridge"
|
||||
app.Usage = "The Scroll Bridge"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Flags = append(app.Flags, l1Flags...)
|
||||
app.Flags = append(app.Flags, l2Flags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Run the sequencer.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func applyConfig(ctx *cli.Context, cfg *config.Config) {
|
||||
if ctx.IsSet(l1UrlFlag.Name) {
|
||||
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(l2UrlFlag.Name) {
|
||||
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
applyConfig(ctx, cfg)
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
var (
|
||||
l1Backend *l1.Backend
|
||||
l2Backend *l2.Backend
|
||||
)
|
||||
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
|
||||
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
l1Backend.Stop()
|
||||
l2Backend.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = l1Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l1 backend", "error", err)
|
||||
}
|
||||
if err = l2Backend.Start(); err != nil {
|
||||
log.Crit("couldn't start l2 backend", "error", err)
|
||||
}
|
||||
|
||||
apis := l2Backend.APIs()
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start HTTP api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
app.Run()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"confirmations": 6,
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
@@ -50,6 +51,7 @@
|
||||
"batch_proposer_config": {
|
||||
"proof_generation_freq": 1,
|
||||
"batch_gas_threshold": 3000000,
|
||||
"batch_tx_num_threshold": 135,
|
||||
"batch_time_sec": 300,
|
||||
"batch_blocks_limit": 100,
|
||||
"skipped_opcodes": [
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
@@ -30,9 +28,5 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cover value by env fields
|
||||
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
|
||||
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,9 @@ type L1Config struct {
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The messenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The rollup contract address deployed on layer 1 chain.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ type L2Config struct {
|
||||
type BatchProposerConfig struct {
|
||||
// Proof generation frequency, generating proof every k blocks
|
||||
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
|
||||
// Txnum threshold in a batch
|
||||
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
|
||||
// Gas threshold in a batch
|
||||
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
|
||||
// Time waited to generate a batch even if gas_threshold not met
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.18
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
|
||||
@@ -348,8 +348,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
|
||||
@@ -31,7 +31,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RelayerConfig.RollupContractAddress, orm)
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
|
||||
65
bridge/l1/l1_test.go
Normal file
65
bridge/l1/l1_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
// docker consider handler.
|
||||
l1gethImg docker.ImgInstance
|
||||
l2gethImg docker.ImgInstance
|
||||
dbImg docker.ImgInstance
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
|
||||
|
||||
// Create l2geth container.
|
||||
l2gethImg = docker.NewTestL2Docker(t)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
|
||||
|
||||
// Create db container.
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
}
|
||||
|
||||
func free(t *testing.T) {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
if l1gethImg != nil {
|
||||
assert.NoError(t, l1gethImg.Stop())
|
||||
}
|
||||
if l2gethImg != nil {
|
||||
assert.NoError(t, l2gethImg.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
func TestL1(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("testStartWatcher", testStartWatcher)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
@@ -51,7 +52,8 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
|
||||
|
||||
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Error("new sender failed", "err", err)
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new sender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l1_test
|
||||
package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -9,37 +9,21 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l1"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
// TestCreateNewRelayer test create new relayer instance and stop
|
||||
func TestCreateNewL1Relayer(t *testing.T) {
|
||||
cfg, err := config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
l1docker := docker.NewTestL1Docker(t)
|
||||
defer l1docker.Stop()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1docker.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1docker.Endpoint()
|
||||
|
||||
client, err := ethclient.Dial(l1docker.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbImg := docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
defer dbImg.Stop()
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -82,22 +82,23 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
// Start the Watcher module.
|
||||
func (w *Watcher) Start() {
|
||||
go func() {
|
||||
// trigger by timer
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
for ; true; <-ticker.C {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-w.stop:
|
||||
return
|
||||
|
||||
default:
|
||||
blockNumber, err := w.client.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
if err := w.fetchContractEvent(blockNumber); err != nil {
|
||||
if err := w.FetchContractEvent(blockNumber); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
case <-w.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -111,105 +112,112 @@ func (w *Watcher) Stop() {
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *Watcher) fetchContractEvent(blockHeight uint64) error {
|
||||
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
defer func() {
|
||||
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
if to > toBlock {
|
||||
to = toBlock
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(fromBlock), // inclusive
|
||||
ToBlock: big.NewInt(toBlock), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.rollupAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
|
||||
// warning: uint int conversion...
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.rollupAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
|
||||
"cnt", len(logs))
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
continue
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchIDs []string
|
||||
for _, event := range rollupEvents {
|
||||
batchIDs = append(batchIDs, event.batchID.String())
|
||||
}
|
||||
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByIDList", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(statuses) != len(batchIDs) {
|
||||
log.Error("RollupStatus.Length mismatch with BatchIDs.Length")
|
||||
return nil
|
||||
}
|
||||
// use rollup event to update rollup results db status
|
||||
var batchIDs []string
|
||||
for _, event := range rollupEvents {
|
||||
batchIDs = append(batchIDs, event.batchID.String())
|
||||
}
|
||||
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByIDList", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(statuses) != len(batchIDs) {
|
||||
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
for index, event := range rollupEvents {
|
||||
batchID := event.batchID.String()
|
||||
status := statuses[index]
|
||||
if event.status != status {
|
||||
if event.status == orm.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
} else if event.status == orm.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
for index, event := range rollupEvents {
|
||||
batchID := event.batchID.String()
|
||||
status := statuses[index]
|
||||
// only update when db status is before event status
|
||||
if event.status > status {
|
||||
if event.status == orm.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
} else if event.status == orm.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
}
|
||||
|
||||
err = w.db.SaveL1Messages(w.ctx, sentMessageEvents)
|
||||
if err == nil {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
|
||||
29
bridge/l1/watcher_test.go
Normal file
29
bridge/l1/watcher_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testStartWatcher(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package l2
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
@@ -28,7 +27,7 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -67,8 +66,3 @@ func (l2 *Backend) APIs() []rpc.API {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MockBlockTrace for test case
|
||||
func (l2 *Backend) MockBlockTrace(blockTrace *types.BlockTrace) {
|
||||
l2.l2Watcher.Send(blockTrace)
|
||||
}
|
||||
|
||||
@@ -18,9 +18,10 @@ type batchProposer struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchTxNumThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
|
||||
proofGenerationFreq uint64
|
||||
skippedOpcodes map[string]struct{}
|
||||
@@ -32,6 +33,7 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
|
||||
orm: orm,
|
||||
batchTimeSec: cfg.BatchTimeSec,
|
||||
batchGasThreshold: cfg.BatchGasThreshold,
|
||||
batchTxNumThreshold: cfg.BatchTxNumThreshold,
|
||||
batchBlocksLimit: cfg.BatchBlocksLimit,
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
skippedOpcodes: cfg.SkippedOpcodes,
|
||||
@@ -58,17 +60,23 @@ func (w *batchProposer) tryProposeBatch() error {
|
||||
return w.createBatchForBlocks(blocks[:1])
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > w.batchTxNumThreshold {
|
||||
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
|
||||
return w.createBatchForBlocks(blocks[:1])
|
||||
}
|
||||
|
||||
var (
|
||||
length = len(blocks)
|
||||
gasUsed uint64
|
||||
length = len(blocks)
|
||||
gasUsed, txNum uint64
|
||||
)
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blocks {
|
||||
if gasUsed+block.GasUsed > w.batchGasThreshold {
|
||||
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
|
||||
blocks = blocks[:i]
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
|
||||
62
bridge/l2/batch_proposer_test.go
Normal file
62
bridge/l2/batch_proposer_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
func testBatchProposer(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
trace2 := &types.BlockTrace{}
|
||||
trace3 := &types.BlockTrace{}
|
||||
|
||||
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace3)
|
||||
assert.NoError(t, err)
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
|
||||
|
||||
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
|
||||
|
||||
proposer := newBatchProposer(&config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, db)
|
||||
assert.NoError(t, proposer.tryProposeBatch())
|
||||
|
||||
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", 100))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(infos) == 0)
|
||||
|
||||
exist, err := db.BatchRecordExist(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -78,6 +78,8 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
|
||||
t.Run("testBatchProposer", testBatchProposer)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
|
||||
@@ -4,14 +4,15 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -29,8 +30,7 @@ import (
|
||||
// Actions are triggered by new head from layer 1 geth node.
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer2Relayer struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
ctx context.Context
|
||||
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
@@ -56,7 +56,7 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -72,7 +72,6 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2Confir
|
||||
|
||||
return &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
client: ethClient,
|
||||
db: db,
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
@@ -90,14 +89,41 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2Confir
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := r.db.GetL2MessagesByStatusUpToHeight(orm.MsgPending, batch.EndBlockNumber)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
if err := r.processSavedEvent(msg); err != nil {
|
||||
|
||||
// process messages in batches
|
||||
batch_size := r.messageSender.NumberOfAccounts()
|
||||
|
||||
for from := 0; from < len(msgs); from += batch_size {
|
||||
to := from + batch_size
|
||||
|
||||
if to > len(msgs) {
|
||||
to = len(msgs)
|
||||
}
|
||||
|
||||
var g errgroup.Group
|
||||
|
||||
for i := from; i < to; i++ {
|
||||
msg := msgs[i]
|
||||
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch)
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
@@ -106,19 +132,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
|
||||
// @todo add support to relay multiple messages
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if batch.EndBlockNumber < msg.Height {
|
||||
// log.Warn("corresponding block not finalized", "status", status)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBatch) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
@@ -240,21 +254,23 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
txID := id + "-commit"
|
||||
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
|
||||
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "id", id, "index", batch.Index, "hash", hash)
|
||||
log.Info("commitBatch in layer1", "batchID", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment[id] = id
|
||||
r.processingCommitment[txID] = id
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
@@ -332,7 +348,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
txHash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
txID := id + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
hash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
@@ -340,15 +358,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "id", id, "hash", hash)
|
||||
log.Info("finalizeBatchWithProof in layer1", "batchID", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "id", id, "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batchID", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization[id] = id
|
||||
r.processingFinalization[txID] = id
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
@@ -367,9 +385,26 @@ func (r *Layer2Relayer) Start() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.ProcessSavedEvents()
|
||||
r.ProcessPendingBatches()
|
||||
r.ProcessCommittedBatches()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessSavedEvents()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessPendingBatches()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessCommittedBatches()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -42,7 +40,7 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -57,7 +55,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -76,7 +74,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(context.Background(), traces)
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -111,7 +109,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -132,7 +130,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
traces = append(traces, blockTrace)
|
||||
|
||||
err = db.InsertBlockTraces(context.Background(), traces)
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -168,7 +166,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -81,49 +81,29 @@ func (w *WatcherClient) Start() {
|
||||
panic("must run L2 watcher with DB")
|
||||
}
|
||||
|
||||
lastFetchedBlock, err := w.orm.GetBlockTracesLatestHeight()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to GetBlockTracesLatestHeight in DB: %v", err))
|
||||
}
|
||||
|
||||
if lastFetchedBlock < 0 {
|
||||
lastFetchedBlock = 0
|
||||
}
|
||||
lastBlockHeightChangeTime := time.Now()
|
||||
|
||||
// trigger by timer
|
||||
// TODO: make it configurable
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
for ; true; <-ticker.C {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-w.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
// get current height
|
||||
number, err := w.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get_BlockNumber", "err", err)
|
||||
continue
|
||||
}
|
||||
duration := time.Since(lastBlockHeightChangeTime)
|
||||
var blockToFetch uint64
|
||||
if number > uint64(lastFetchedBlock)+w.confirmations {
|
||||
// latest block height changed
|
||||
blockToFetch = number - w.confirmations
|
||||
} else if duration.Seconds() > 60 {
|
||||
// l2geth didn't produce any blocks more than 1 minute.
|
||||
blockToFetch = number
|
||||
}
|
||||
// fetch at most `blockTracesFetchLimit=10` missing blocks
|
||||
if blockToFetch > uint64(lastFetchedBlock)+blockTracesFetchLimit {
|
||||
blockToFetch = uint64(lastFetchedBlock) + blockTracesFetchLimit
|
||||
}
|
||||
if lastFetchedBlock != int64(blockToFetch) {
|
||||
lastFetchedBlock = int64(blockToFetch)
|
||||
lastBlockHeightChangeTime = time.Now()
|
||||
|
||||
if number >= w.confirmations {
|
||||
number = number - w.confirmations
|
||||
} else {
|
||||
number = 0
|
||||
}
|
||||
|
||||
if err := w.tryFetchRunningMissingBlocks(w.ctx, blockToFetch); err != nil {
|
||||
if err := w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
|
||||
log.Error("failed to fetchRunningMissingBlocks", "err", err)
|
||||
}
|
||||
|
||||
@@ -135,9 +115,6 @@ func (w *WatcherClient) Start() {
|
||||
if err := w.batchProposer.tryProposeBatch(); err != nil {
|
||||
log.Error("failed to tryProposeBatch", "err", err)
|
||||
}
|
||||
|
||||
case <-w.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -151,7 +128,7 @@ func (w *WatcherClient) Stop() {
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// try fetch missing blocks if inconsistent
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) error {
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
@@ -159,30 +136,50 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTr
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
|
||||
}
|
||||
backTrackTo := uint64(0)
|
||||
|
||||
// Can't get trace from genesis block, so the default start number is 1.
|
||||
var from = uint64(1)
|
||||
if heightInDB > 0 {
|
||||
backTrackTo = uint64(heightInDB)
|
||||
from = uint64(heightInDB) + 1
|
||||
}
|
||||
|
||||
// start backtracking
|
||||
for ; from <= blockHeight; from += blockTracesFetchLimit {
|
||||
to := from + blockTracesFetchLimit - 1
|
||||
|
||||
if to > blockHeight {
|
||||
to = blockHeight
|
||||
}
|
||||
|
||||
// Get block traces and insert into db.
|
||||
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var traces []*types.BlockTrace
|
||||
for number := backTrackFrom; number > backTrackTo; number-- {
|
||||
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block trace", "height", number)
|
||||
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
|
||||
}
|
||||
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash)
|
||||
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
|
||||
|
||||
traces = append(traces, trace)
|
||||
|
||||
}
|
||||
if len(traces) > 0 {
|
||||
if err = w.orm.InsertBlockTraces(ctx, traces); err != nil {
|
||||
if err := w.orm.InsertBlockTraces(traces); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -190,70 +187,75 @@ const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
defer func() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: big.NewInt(fromBlock), // inclusive
|
||||
ToBlock: big.NewInt(toBlock), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Error("failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
|
||||
"cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
if to > toBlock {
|
||||
to = toBlock
|
||||
}
|
||||
|
||||
// warning: uint int conversion...
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
log.Error("failed to get event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
}
|
||||
|
||||
err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents)
|
||||
if err == nil {
|
||||
w.processedMsgHeight = uint64(toBlock)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
|
||||
|
||||
@@ -1,33 +1,5 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// WatcherAPI watcher api service
|
||||
type WatcherAPI interface {
|
||||
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
|
||||
}
|
||||
|
||||
// ReplayBlockResultByHash temporary interface for easy testing.
|
||||
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
|
||||
orm := r.orm
|
||||
params := make(map[string]interface{})
|
||||
if number, ok := blockNrOrHash.Number(); ok {
|
||||
params["number"] = int64(number)
|
||||
}
|
||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||
params["hash"] = hash.String()
|
||||
}
|
||||
if len(params) == 0 {
|
||||
return false, errors.New("empty params")
|
||||
}
|
||||
trace, err := orm.GetBlockTraces(params)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
r.Send(&trace[0])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package l2_test
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
@@ -32,7 +31,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer l2db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := l2.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -68,7 +67,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
@@ -80,7 +79,10 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -90,7 +92,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
// extra block mined
|
||||
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message = []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -127,7 +129,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
@@ -147,7 +149,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -163,7 +167,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
@@ -183,8 +189,8 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
|
||||
return l2.NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
187
bridge/mock_bridge/MockBridgeL1.sol
Normal file
187
bridge/mock_bridge/MockBridgeL1.sol
Normal file
@@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL1 {
|
||||
/*********************************
|
||||
* Events from L1ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/************************
|
||||
* Events from ZKRollup *
|
||||
************************/
|
||||
|
||||
/// @notice Emitted when a new batch is commited.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
|
||||
/// @notice Emitted when a batch is reverted.
|
||||
/// @param _batchId The identification of the batch.
|
||||
event RevertBatch(bytes32 indexed _batchId);
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
uint256 blockHeight;
|
||||
bytes merkleProof;
|
||||
}
|
||||
|
||||
/// @dev The transanction struct
|
||||
struct Layer2Transaction {
|
||||
address caller;
|
||||
uint64 nonce;
|
||||
address target;
|
||||
uint64 gas;
|
||||
uint256 gasPrice;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
// signature
|
||||
uint256 r;
|
||||
uint256 s;
|
||||
uint64 v;
|
||||
}
|
||||
|
||||
/// @dev The block header struct
|
||||
struct Layer2BlockHeader {
|
||||
bytes32 blockHash;
|
||||
bytes32 parentHash;
|
||||
uint256 baseFee;
|
||||
bytes32 stateRoot;
|
||||
uint64 blockHeight;
|
||||
uint64 gasUsed;
|
||||
uint64 timestamp;
|
||||
bytes extraData;
|
||||
Layer2Transaction[] txs;
|
||||
}
|
||||
|
||||
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
|
||||
struct Layer2Batch {
|
||||
uint64 batchIndex;
|
||||
// The hash of the last block in the parent batch
|
||||
bytes32 parentHash;
|
||||
Layer2BlockHeader[] blocks;
|
||||
}
|
||||
|
||||
struct Layer2BatchStored {
|
||||
bytes32 batchHash;
|
||||
bytes32 parentHash;
|
||||
uint64 batchIndex;
|
||||
bool verified;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/// @notice Mapping from batch id to batch struct.
|
||||
mapping(bytes32 => Layer2BatchStored) public batches;
|
||||
|
||||
/************************************
|
||||
* Functions from L1ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
uint256 _nonce = messageNonce;
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce += 1;
|
||||
}
|
||||
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
}
|
||||
|
||||
/***************************
|
||||
* Functions from ZKRollup *
|
||||
***************************/
|
||||
|
||||
function commitBatch(Layer2Batch memory _batch) external {
|
||||
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
|
||||
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
|
||||
|
||||
Layer2BatchStored storage _batchStored = batches[_batchId];
|
||||
_batchStored.batchHash = _batchHash;
|
||||
_batchStored.parentHash = _batch.parentHash;
|
||||
_batchStored.batchIndex = _batch.batchIndex;
|
||||
|
||||
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
|
||||
}
|
||||
|
||||
function revertBatch(bytes32 _batchId) external {
|
||||
emit RevertBatch(_batchId);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes32 _batchId,
|
||||
uint256[] memory,
|
||||
uint256[] memory
|
||||
) external {
|
||||
Layer2BatchStored storage _batch = batches[_batchId];
|
||||
uint256 _batchIndex = _batch.batchIndex;
|
||||
|
||||
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute a unique batch id for mapping.
|
||||
/// @param _batchHash The hash of the batch.
|
||||
/// @param _parentHash The hash of the batch.
|
||||
/// @param _batchIndex The index of the batch.
|
||||
/// @return Return the computed batch id.
|
||||
function _computeBatchId(
|
||||
bytes32 _batchHash,
|
||||
bytes32 _parentHash,
|
||||
uint256 _batchIndex
|
||||
) internal pure returns (bytes32) {
|
||||
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
|
||||
}
|
||||
}
|
||||
67
bridge/mock_bridge/MockBridgeL2.sol
Normal file
67
bridge/mock_bridge/MockBridgeL2.sol
Normal file
@@ -0,0 +1,67 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL2 {
|
||||
/*********************************
|
||||
* Events from L2ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/************************************
|
||||
* Functions from L2ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _nonce = messageNonce;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
//SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract Mock_Bridge {
|
||||
|
||||
event SentMessage(
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
);
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
// @todo compute fee
|
||||
uint256 _fee = 0;
|
||||
uint256 _nonce = messageNonce;
|
||||
require(msg.value >= _fee, "cannot pay fee");
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
|
||||
unchecked {
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,9 +16,10 @@ import (
|
||||
type accountPool struct {
|
||||
client *ethclient.Client
|
||||
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
accsCh chan *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
numAccounts int
|
||||
accsCh chan *bind.TransactOpts
|
||||
}
|
||||
|
||||
// newAccounts creates an accountPool instance.
|
||||
@@ -28,10 +29,11 @@ func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.
|
||||
minBalance.SetString("100000000000000000000", 10)
|
||||
}
|
||||
accs := &accountPool{
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
numAccounts: len(privs),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
@@ -151,12 +151,12 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
|
||||
|
||||
// NumberOfAccounts return the count of accounts.
|
||||
func (s *Sender) NumberOfAccounts() int {
|
||||
return len(s.auths.accounts)
|
||||
return s.auths.numAccounts
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
|
||||
// estimate gas limit
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, ethereum.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
156
bridge/tests/bridge_test.go
Normal file
156
bridge/tests/bridge_test.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"scroll-tech/common/docker"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
// private key
|
||||
privateKey *ecdsa.PrivateKey
|
||||
|
||||
// docker consider handler.
|
||||
l1gethImg docker.ImgInstance
|
||||
l2gethImg docker.ImgInstance
|
||||
dbImg docker.ImgInstance
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
// auth
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
|
||||
// l1 messenger contract
|
||||
l1MessengerInstance *mock_bridge.MockBridgeL1
|
||||
l1MessengerAddress common.Address
|
||||
|
||||
// l1 rollup contract
|
||||
l1RollupInstance *mock_bridge.MockBridgeL1
|
||||
l1RollupAddress common.Address
|
||||
|
||||
// l2 messenger contract
|
||||
l2MessengerInstance *mock_bridge.MockBridgeL2
|
||||
l2MessengerAddress common.Address
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
var err error
|
||||
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.Confirmations = 0
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L2Config.Confirmations = 0
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
|
||||
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
|
||||
|
||||
// Create l2geth container.
|
||||
l2gethImg = docker.NewTestL2Docker(t)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
|
||||
|
||||
// Create db container.
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
// Create l1geth and l2geth client.
|
||||
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create l1 and l2 auth
|
||||
l1Auth = prepareAuth(t, l1Client, privateKey)
|
||||
l2Auth = prepareAuth(t, l2Client, privateKey)
|
||||
}
|
||||
|
||||
func free(t *testing.T) {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
if l1gethImg != nil {
|
||||
assert.NoError(t, l1gethImg.Stop())
|
||||
}
|
||||
if l2gethImg != nil {
|
||||
assert.NoError(t, l2gethImg.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
func prepareContracts(t *testing.T) {
|
||||
var err error
|
||||
var tx *types.Transaction
|
||||
|
||||
// L1 messenger contract
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 rollup contract
|
||||
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L2 messenger contract
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
|
||||
assert.NoError(t, err)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
|
||||
cfg.L1Config.RollupContractAddress = l1RollupAddress
|
||||
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
|
||||
|
||||
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
|
||||
assert.NoError(t, err)
|
||||
auth.Value = big.NewInt(0) // in wei
|
||||
assert.NoError(t, err)
|
||||
return auth
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
}
|
||||
132
bridge/tests/rollup_test.go
Normal file
132
bridge/tests/rollup_test.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
var parentHash common.Hash
|
||||
for i := 1; i <= 10; i++ {
|
||||
header := types.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
ParentHash: parentHash,
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
traces = append(traces, &types.BlockTrace{
|
||||
Header: &header,
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
})
|
||||
parentHash = header.Hash()
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add one batch to db
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[1].Header.Number.Uint64(),
|
||||
Hash: traces[1].Header.Hash().String(),
|
||||
ParentHash: traces[1].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[1].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridge
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.17-rust-nightly-2022-08-23 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -24,6 +24,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./roller/go.* ./roller/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build db_cli
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
integration-test/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
14
build/post-test-report-coverage.sh
Executable file
14
build/post-test-report-coverage.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
package1=coverage.bridge.xml \
|
||||
package2=coverage.db.xml \
|
||||
package3=coverage.common.xml \
|
||||
package4=coverage.coordinator.xml \
|
||||
package5=coverage.integration.xml
|
||||
168
common/cmd/cmd.go
Normal file
168
common/cmd/cmd.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
|
||||
func init() {
|
||||
v := os.Getenv("LOG_DOCKER")
|
||||
if v == "true" || v == "TRUE" {
|
||||
verbose = true
|
||||
}
|
||||
}
|
||||
|
||||
type checkFunc func(buf string)
|
||||
|
||||
// Cmd struct
|
||||
type Cmd struct {
|
||||
*testing.T
|
||||
|
||||
name string
|
||||
args []string
|
||||
|
||||
mu sync.Mutex
|
||||
cmd *exec.Cmd
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
//stdout bytes.Buffer
|
||||
Err error
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
T: t,
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
// RunApp exec's the current binary using name as argv[0] which will trigger the
|
||||
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
|
||||
func (t *Cmd) RunApp(parallel bool) {
|
||||
t.Log("cmd: ", append([]string{t.name}, t.args...))
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{t.name}, t.args...),
|
||||
Stderr: t,
|
||||
Stdout: t,
|
||||
}
|
||||
if parallel {
|
||||
go func() {
|
||||
_ = cmd.Run()
|
||||
}()
|
||||
} else {
|
||||
_ = cmd.Run()
|
||||
}
|
||||
t.mu.Lock()
|
||||
t.cmd = cmd
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (t *Cmd) WaitExit() {
|
||||
// Wait all the check funcs are finished or test status is failed.
|
||||
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
|
||||
<-time.After(time.Millisecond * 500)
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
t.mu.Lock()
|
||||
_ = t.cmd.Process.Signal(os.Interrupt)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// Interrupt send interrupt signal.
|
||||
func (t *Cmd) Interrupt() {
|
||||
t.mu.Lock()
|
||||
t.Err = t.cmd.Process.Signal(os.Interrupt)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
func (t *Cmd) RegistFunc(key string, check checkFunc) {
|
||||
t.checkFuncs.Set(key, check)
|
||||
}
|
||||
|
||||
// UnRegistFunc unregister check func
|
||||
func (t *Cmd) UnRegistFunc(key string) {
|
||||
t.checkFuncs.Pop(key)
|
||||
}
|
||||
|
||||
// ExpectWithTimeout wait result during timeout time.
|
||||
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
|
||||
if keyword == "" {
|
||||
return
|
||||
}
|
||||
okCh := make(chan struct{}, 1)
|
||||
t.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
waitResult := func() {
|
||||
defer t.UnRegistFunc(keyword)
|
||||
select {
|
||||
case <-okCh:
|
||||
return
|
||||
case <-time.After(timeout):
|
||||
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
|
||||
}
|
||||
}
|
||||
|
||||
if parallel {
|
||||
go waitResult()
|
||||
} else {
|
||||
waitResult()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Cmd) runCmd() {
|
||||
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = t
|
||||
cmd.Stderr = t
|
||||
_ = cmd.Run()
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (t *Cmd) RunCmd(parallel bool) {
|
||||
t.Log("cmd: ", t.args)
|
||||
if parallel {
|
||||
go t.runCmd()
|
||||
} else {
|
||||
t.runCmd()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose {
|
||||
t.Logf("%s: %v", t.name, out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
t.Logf("%s: %v", t.name, out)
|
||||
}
|
||||
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
|
||||
check := value.(checkFunc)
|
||||
check(out)
|
||||
})
|
||||
return len(data), nil
|
||||
}
|
||||
42
common/cmd/cmd_test.go
Normal file
42
common/cmd/cmd_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package cmd_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
|
||||
|
||||
tm := time.Now()
|
||||
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
|
||||
|
||||
okCh := make(chan struct{}, 1)
|
||||
app.RegistFunc(curTime, func(buf string) {
|
||||
if strings.Contains(buf, curTime) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer app.UnRegistFunc(curTime)
|
||||
|
||||
// Run cmd.
|
||||
app.RunCmd(true)
|
||||
|
||||
// Wait result.
|
||||
select {
|
||||
case <-okCh:
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
|
||||
func init() {
|
||||
v := os.Getenv("LOG_DOCKER")
|
||||
if v == "true" || v == "TRUE" {
|
||||
verbose = true
|
||||
}
|
||||
}
|
||||
|
||||
type checkFunc func(buf string)
|
||||
|
||||
// Cmd struct
|
||||
type Cmd struct {
|
||||
*testing.T
|
||||
|
||||
checkFuncs sync.Map //map[string]checkFunc
|
||||
|
||||
//stdout bytes.Buffer
|
||||
errMsg chan error
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(t *testing.T) *Cmd {
|
||||
cmd := &Cmd{
|
||||
T: t,
|
||||
//stdout: bytes.Buffer{},
|
||||
errMsg: make(chan error, 2),
|
||||
}
|
||||
// Handle panic.
|
||||
cmd.RegistFunc("panic", func(buf string) {
|
||||
if strings.Contains(buf, "panic") {
|
||||
cmd.errMsg <- errors.New(buf)
|
||||
}
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
func (t *Cmd) RegistFunc(key string, check checkFunc) {
|
||||
t.checkFuncs.Store(key, check)
|
||||
}
|
||||
|
||||
// UnRegistFunc unregister check func
|
||||
func (t *Cmd) UnRegistFunc(key string) {
|
||||
if _, ok := t.checkFuncs.Load(key); ok {
|
||||
t.checkFuncs.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (t *Cmd) RunCmd(args []string, parallel bool) {
|
||||
t.Log("RunCmd cmd", args)
|
||||
if parallel {
|
||||
go t.runCmd(args)
|
||||
} else {
|
||||
t.runCmd(args)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMsg return error output channel
|
||||
func (t *Cmd) ErrMsg() <-chan error {
|
||||
return t.errMsg
|
||||
}
|
||||
|
||||
func (t *Cmd) Write(data []byte) (int, error) {
|
||||
out := string(data)
|
||||
if verbose {
|
||||
t.Logf(out)
|
||||
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
|
||||
t.Logf(out)
|
||||
}
|
||||
go func(content string) {
|
||||
t.checkFuncs.Range(func(key, value any) bool {
|
||||
check := value.(checkFunc)
|
||||
check(content)
|
||||
return true
|
||||
})
|
||||
}(out)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (t *Cmd) runCmd(args []string) {
|
||||
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = t
|
||||
cmd.Stderr = t
|
||||
_ = cmd.Run()
|
||||
}
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
@@ -21,19 +24,20 @@ type ImgDB struct {
|
||||
password string
|
||||
|
||||
running bool
|
||||
*Cmd
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(t *testing.T, image, password, dbName string, port int) ImgInstance {
|
||||
return &ImgDB{
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
Cmd: NewCmd(t),
|
||||
}
|
||||
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
@@ -42,7 +46,7 @@ func (i *ImgDB) Start() error {
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.Cmd.RunCmd(i.prepare(), true)
|
||||
i.cmd.RunCmd(true)
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
@@ -59,14 +63,13 @@ func (i *ImgDB) Stop() error {
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
@@ -94,7 +97,7 @@ func (i *ImgDB) prepare() []string {
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.RegistFunc(keyword, func(buf string) {
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
@@ -103,14 +106,16 @@ func (i *ImgDB) isOk() bool {
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.UnRegistFunc(keyword)
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
time.Sleep(time.Millisecond * 1500)
|
||||
i.id = GetContainerID(i.name)
|
||||
utils.TryTimes(3, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case <-time.NewTimer(time.Second * 10).C:
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
@@ -23,20 +26,21 @@ type ImgGeth struct {
|
||||
wsPort int
|
||||
|
||||
running bool
|
||||
*Cmd
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
|
||||
return &ImgGeth{
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
Cmd: NewCmd(t),
|
||||
}
|
||||
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
@@ -45,7 +49,7 @@ func (i *ImgGeth) Start() error {
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.Cmd.RunCmd(i.prepare(), true)
|
||||
i.cmd.RunCmd(true)
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
@@ -72,7 +76,7 @@ func (i *ImgGeth) Endpoint() string {
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.RegistFunc(keyword, func(buf string) {
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
@@ -81,13 +85,16 @@ func (i *ImgGeth) isOk() bool {
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.UnRegistFunc(keyword)
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
i.id = GetContainerID(i.name)
|
||||
utils.TryTimes(3, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
return i.id != ""
|
||||
case <-time.NewTimer(time.Second * 10).C:
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,19 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
func TestDocker(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("testL1Geth", testL1Geth)
|
||||
t.Run("testL2Geth", testL2Geth)
|
||||
t.Run("testDB", testDB)
|
||||
}
|
||||
|
||||
func testL1Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
|
||||
assert.NoError(t, img.Start())
|
||||
img := NewTestL1Docker(t)
|
||||
defer img.Stop()
|
||||
|
||||
client, err := ethclient.Dial(img.Endpoint())
|
||||
@@ -26,12 +33,11 @@ func TestL1Geth(t *testing.T) {
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
func testL2Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
|
||||
assert.NoError(t, img.Start())
|
||||
img := NewTestL2Docker(t)
|
||||
defer img.Stop()
|
||||
|
||||
client, err := ethclient.Dial(img.Endpoint())
|
||||
@@ -42,7 +48,7 @@ func TestL2Geth(t *testing.T) {
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
func testDB(t *testing.T) {
|
||||
driverName := "postgres"
|
||||
dbImg := NewTestDBDocker(t, driverName)
|
||||
defer dbImg.Stop()
|
||||
|
||||
@@ -36,7 +36,7 @@ func GetContainerID(name string) string {
|
||||
Filters: filter,
|
||||
})
|
||||
if len(lst) > 0 {
|
||||
return lst[0].Names[0]
|
||||
return lst[0].ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -19,6 +24,18 @@ func NewTestL1Docker(t *testing.T) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL1geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(3, func() bool {
|
||||
client, _ := ethclient.Dial(imgL1geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL1geth
|
||||
}
|
||||
|
||||
@@ -27,6 +44,18 @@ func NewTestL2Docker(t *testing.T) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL2geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(3, func() bool {
|
||||
client, _ := ethclient.Dial(imgL2geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL2geth
|
||||
}
|
||||
|
||||
@@ -35,5 +64,15 @@ func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgDB.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
utils.TryTimes(5, func() bool {
|
||||
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
|
||||
if db != nil {
|
||||
return db.Ping() == nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgDB
|
||||
}
|
||||
|
||||
@@ -3,12 +3,13 @@ module scroll-tech/common
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
|
||||
@@ -112,8 +112,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -364,6 +364,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
|
||||
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||
@@ -402,8 +404,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
|
||||
4
common/libzkp/impl/Cargo.lock
generated
4
common/libzkp/impl/Cargo.lock
generated
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
|
||||
source = "git+https://github.com/scroll-tech/scroll-zkevm#51b9b022fcdb45c505ea351f9dca8f2cfbec4d86"
|
||||
dependencies = [
|
||||
"base64 0.13.0",
|
||||
"blake2",
|
||||
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
|
||||
[[package]]
|
||||
name = "zkevm"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
|
||||
source = "git+https://github.com/scroll-tech/scroll-zkevm#51b9b022fcdb45c505ea351f9dca8f2cfbec4d86"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"blake2",
|
||||
|
||||
@@ -8,8 +8,8 @@ edition = "2021"
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[dependencies]
|
||||
zkevm = { git = "https://github.com/scroll-tech/common-rs" }
|
||||
types = { git = "https://github.com/scroll-tech/common-rs" }
|
||||
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-zkevm" }
|
||||
|
||||
log = "0.4"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -41,7 +41,7 @@ type Identity struct {
|
||||
// Roller public key
|
||||
PublicKey string `json:"publicKey"`
|
||||
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
|
||||
Version string `json:"version"`
|
||||
// Random unique token generated by manager
|
||||
Token string `json:"token"`
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -23,4 +24,10 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
ok, err := authMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check public key is ok.
|
||||
pub, err := authMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
|
||||
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
// GetEnvWithDefault get value from env if is none use the default
|
||||
func GetEnvWithDefault(key string, defult string) string {
|
||||
val := os.Getenv(key)
|
||||
if len(val) == 0 {
|
||||
val = defult
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package utils
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// CommonFlags is used for app common flags in different modules
|
||||
|
||||
77
common/utils/rpc_test.go
Normal file
77
common/utils/rpc_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type testService struct{}
|
||||
|
||||
type echoArgs struct {
|
||||
S string
|
||||
}
|
||||
|
||||
type echoResult struct {
|
||||
Name string
|
||||
ID int
|
||||
Args *echoArgs
|
||||
}
|
||||
|
||||
func (s *testService) NoArgsRets() {}
|
||||
|
||||
func (s *testService) Echo(str string, i int, args *echoArgs) echoResult {
|
||||
return echoResult{str, i, args}
|
||||
}
|
||||
|
||||
func TestStartHTTPEndpoint(t *testing.T) {
|
||||
endpoint := "localhost:18080"
|
||||
handler, _, err := StartHTTPEndpoint(endpoint, []rpc.API{
|
||||
{
|
||||
Public: true,
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
client, err := rpc.Dial("http://" + endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
|
||||
|
||||
result := echoResult{}
|
||||
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
|
||||
assert.Equal(t, 0, result.ID)
|
||||
assert.Equal(t, "test", result.Name)
|
||||
|
||||
defer client.Close()
|
||||
}
|
||||
|
||||
func TestStartWSEndpoint(t *testing.T) {
|
||||
endpoint := "localhost:18081"
|
||||
handler, _, err := StartWSEndpoint(endpoint, []rpc.API{
|
||||
{
|
||||
Public: true,
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
client, err := rpc.Dial("ws://" + endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
|
||||
|
||||
result := echoResult{}
|
||||
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
|
||||
assert.Equal(t, 0, result.ID)
|
||||
assert.Equal(t, "test", result.Name)
|
||||
|
||||
defer client.Close()
|
||||
}
|
||||
22
common/utils/simulation.go
Normal file
22
common/utils/simulation.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
func RegisterSimulation(app *cli.App, name string) {
|
||||
// Run the app for integration-test
|
||||
reexec.Register(name, func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
reexec.Init()
|
||||
}
|
||||
13
common/utils/utils.go
Normal file
13
common/utils/utils.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package utils
|
||||
|
||||
import "time"
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
func TryTimes(times int, run func() bool) {
|
||||
for i := 0; i < times; i++ {
|
||||
if run() {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "prealpha-v8.2"
|
||||
var tag = "prealpha-v9.2"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -22,7 +22,7 @@ var commit = func() string {
|
||||
return ""
|
||||
}()
|
||||
|
||||
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/common-rs
|
||||
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
|
||||
var ZK_VERSION string
|
||||
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
|
||||
|
||||
@@ -232,6 +232,50 @@ function initialize(uint256 _chainId) external nonpayable
|
||||
|---|---|---|
|
||||
| _chainId | uint256 | undefined |
|
||||
|
||||
### isBlockFinalized
|
||||
|
||||
```solidity
|
||||
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
|
||||
```
|
||||
|
||||
Return whether the block is finalized by block hash.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _blockHash | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### isBlockFinalized
|
||||
|
||||
```solidity
|
||||
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
|
||||
```
|
||||
|
||||
Return whether the block is finalized by block height.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _blockHeight | uint256 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### lastFinalizedBatchID
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -97,7 +97,7 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
require(!isMessageExecuted[_msghash], "Message successfully executed");
|
||||
|
||||
// @todo check proof
|
||||
require(IZKRollup(rollup).verifyMessageStateProof(_proof.batchIndex, _proof.blockHeight), "invalid state proof");
|
||||
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
|
||||
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
|
||||
|
||||
// @todo check `_to` address to avoid attack.
|
||||
|
||||
@@ -59,6 +59,14 @@ interface IZKRollup {
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
|
||||
/// @notice Return whether the block is finalized by block hash.
|
||||
/// @param blockHash The hash of the block to query.
|
||||
function isBlockFinalized(bytes32 blockHash) external view returns (bool);
|
||||
|
||||
/// @notice Return whether the block is finalized by block height.
|
||||
/// @param blockHeight The height of the block to query.
|
||||
function isBlockFinalized(uint256 blockHeight) external view returns (bool);
|
||||
|
||||
/// @notice Return the message hash by index.
|
||||
/// @param _index The index to query.
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32);
|
||||
|
||||
@@ -89,6 +89,24 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function isBlockFinalized(bytes32 _blockHash) external view returns (bool) {
|
||||
// block not commited
|
||||
if (blocks[_blockHash].transactionRoot == bytes32(0)) return false;
|
||||
|
||||
uint256 _batchIndex = blocks[_blockHash].batchIndex;
|
||||
bytes32 _batchId = finalizedBatches[_batchIndex];
|
||||
return _batchId != bytes32(0);
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function isBlockFinalized(uint256 _blockHeight) external view returns (bool) {
|
||||
bytes32 _batchID = lastFinalizedBatchID;
|
||||
bytes32 _batchHash = batches[_batchID].batchHash;
|
||||
uint256 _maxHeight = blocks[_batchHash].blockHeight;
|
||||
return _blockHeight <= _maxHeight;
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32) {
|
||||
return messageQueue[_index];
|
||||
|
||||
@@ -5,9 +5,9 @@ IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
test:
|
||||
@@ -17,15 +17,13 @@ libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
|
||||
coordinator: ## Builds the Coordinator instance.
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
test-verifier:
|
||||
test-verifier: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./verifier
|
||||
|
||||
test-gpu-verifier:
|
||||
test-gpu-verifier: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
|
||||
@@ -9,17 +9,6 @@ make clean
|
||||
make coordinator
|
||||
```
|
||||
|
||||
## db config
|
||||
|
||||
* db settings in config
|
||||
|
||||
```bash
|
||||
# DB_DSN: db data source name
|
||||
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
|
||||
# DB_DRIVER: db driver name
|
||||
export DB_DRIVER="postgres"
|
||||
```
|
||||
|
||||
## Start
|
||||
|
||||
* use default ports and config.json
|
||||
|
||||
@@ -74,7 +74,7 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
|
||||
go func() {
|
||||
defer func() {
|
||||
m.freeRoller(pubkey)
|
||||
log.Info("roller unregister", "name", authMsg.Identity.Name)
|
||||
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
|
||||
}()
|
||||
|
||||
for {
|
||||
@@ -82,14 +82,14 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
|
||||
case task := <-taskCh:
|
||||
notifier.Notify(rpcSub.ID, task) //nolint
|
||||
case err := <-rpcSub.Err():
|
||||
log.Warn("client stopped the ws connection", "err", err)
|
||||
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
|
||||
return
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Info("roller register", "name", authMsg.Identity.Name, "version", authMsg.Identity.Version)
|
||||
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
@@ -116,6 +116,5 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
|
||||
}
|
||||
defer m.freeTaskIDForRoller(pubkey, proof.ID)
|
||||
|
||||
log.Info("Received zk proof", "proof id", proof.ID, "result", true)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
)
|
||||
|
||||
// RequestToken generates token for roller
|
||||
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
|
||||
var token string
|
||||
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
|
||||
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
|
||||
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
|
||||
}
|
||||
|
||||
// SubmitProof get proof from roller.
|
||||
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
|
||||
var ok bool
|
||||
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
|
||||
}
|
||||
@@ -3,7 +3,10 @@ package client
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
)
|
||||
|
||||
// Client defines typed wrappers for the Ethereum RPC API.
|
||||
@@ -29,3 +32,21 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
|
||||
func NewClient(c *rpc.Client) *Client {
|
||||
return &Client{client: c}
|
||||
}
|
||||
|
||||
// RequestToken generates token for roller
|
||||
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
|
||||
var token string
|
||||
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
|
||||
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
|
||||
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
|
||||
}
|
||||
|
||||
// SubmitProof get proof from roller.
|
||||
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
|
||||
var ok bool
|
||||
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
|
||||
}
|
||||
|
||||
134
coordinator/cmd/app/app.go
Normal file
134
coordinator/cmd/app/app.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
"scroll-tech/coordinator/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// Set up Coordinator app info.
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator"
|
||||
app.Usage = "The Scroll L2 Coordinator"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "coordinator-test")
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize all coordinator modules.
|
||||
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
rollerManager.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = rollerManager.Start(); err != nil {
|
||||
log.Crit("couldn't start roller manager", "error", err)
|
||||
}
|
||||
|
||||
apis := rollerManager.APIs()
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start RPC api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
// Register api and start ws service.
|
||||
if ctx.Bool(wsEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartWSEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(wsListenAddrFlag.Name),
|
||||
ctx.Int(wsPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}()
|
||||
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
coordinator/cmd/app/app_test.go
Normal file
19
coordinator/cmd/app/app_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunCoordinator(t *testing.T) {
|
||||
coordinator := cmd.NewCmd(t, "coordinator-test", "--version")
|
||||
defer coordinator.WaitExit()
|
||||
|
||||
// wait result
|
||||
coordinator.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("coordinator version %s", version.Version))
|
||||
coordinator.RunApp(false)
|
||||
}
|
||||
@@ -1,13 +1,8 @@
|
||||
package main
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var (
|
||||
verifierMockFlag = cli.BoolFlag{
|
||||
Name: "verifier.mock",
|
||||
Usage: "Mock the verifier",
|
||||
Value: false,
|
||||
}
|
||||
apiFlags = []cli.Flag{
|
||||
// http flags
|
||||
&httpEnabledFlag,
|
||||
@@ -1,132 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
"scroll-tech/coordinator/config"
|
||||
)
|
||||
import "scroll-tech/coordinator/cmd/app"
|
||||
|
||||
func main() {
|
||||
// Set up Coordinator app info.
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "Coordinator"
|
||||
app.Usage = "The Scroll L2 Coordinator"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Flags = append(app.Flags, &verifierMockFlag)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Run the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func applyConfig(ctx *cli.Context, cfg *config.Config) {
|
||||
if ctx.IsSet(verifierMockFlag.Name) {
|
||||
cfg.RollerManagerConfig.Verifier = &config.VerifierConfig{MockMode: ctx.Bool(verifierMockFlag.Name)}
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
applyConfig(ctx, cfg)
|
||||
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
// init l2geth connection
|
||||
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to init l2geth connection", "err", err)
|
||||
}
|
||||
|
||||
// Initialize all coordinator modules.
|
||||
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
rollerManager.Stop()
|
||||
err = ormFactory.Close()
|
||||
if err != nil {
|
||||
log.Error("can not close ormFactory", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start all modules.
|
||||
if err = rollerManager.Start(); err != nil {
|
||||
log.Crit("couldn't start coordinator", "error", err)
|
||||
}
|
||||
|
||||
apis := rollerManager.APIs()
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(httpListenAddrFlag.Name),
|
||||
ctx.Int(httpPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start HTTP api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}()
|
||||
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
|
||||
}
|
||||
if ctx.Bool(wsEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartWSEndpoint(
|
||||
fmt.Sprintf(
|
||||
"%s:%d",
|
||||
ctx.String(wsListenAddrFlag.Name),
|
||||
ctx.Int(wsPortFlag.Name)),
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = handler.Shutdown(ctx.Context)
|
||||
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}()
|
||||
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
|
||||
}
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
app.Run()
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
db_config "scroll-tech/database"
|
||||
)
|
||||
|
||||
@@ -59,10 +57,6 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cover value by env fields
|
||||
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
|
||||
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
|
||||
|
||||
// Check roller's order session
|
||||
order := strings.ToUpper(cfg.RollerManagerConfig.OrderSession)
|
||||
if len(order) > 0 && !(order == "ASC" || order == "DESC") {
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.18
|
||||
require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
|
||||
@@ -349,8 +349,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
|
||||
@@ -195,8 +195,13 @@ func (m *Manager) restorePrevSessions() {
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
log.Info("Coordinator restart reload sessions", "ID", sess.info.ID, "sess", sess.info)
|
||||
go m.CollectProofs(sess.info.ID, sess)
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info("restore roller info for session", "session id", sess.info.ID, "roller name", roller.Name, "public key", roller.PublicKey, "proof status", roller.Status)
|
||||
}
|
||||
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -220,17 +225,29 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
if roller, ok := sess.info.Rollers[pk]; !ok {
|
||||
return fmt.Errorf("roller %s is not eligible to partake in proof session %v", pk, msg.ID)
|
||||
} else if roller.Status == orm.RollerProofValid {
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == orm.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn("roller has already submitted valid proof in proof session", "roller", pk, "proof id", msg.ID)
|
||||
log.Warn(
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
log.Info("Received zk proof", "proof id", msg.ID)
|
||||
log.Info(
|
||||
"handling zk proof",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
@@ -250,7 +267,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}()
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
log.Error("Roller failed to generate proof", "msg.ID", msg.ID, "error", msg.Error)
|
||||
log.Error(
|
||||
"Roller failed to generate proof",
|
||||
"msg.ID", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"error", msg.Error,
|
||||
)
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
|
||||
log.Error("failed to update task status as failed", "error", dbErr)
|
||||
}
|
||||
@@ -308,7 +331,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(id string, sess *session) {
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
timer := time.NewTimer(time.Duration(m.cfg.CollectionTime) * time.Minute)
|
||||
|
||||
for {
|
||||
@@ -318,7 +341,7 @@ func (m *Manager) CollectProofs(id string, sess *session) {
|
||||
|
||||
// Ensure proper clean-up of resources.
|
||||
defer func() {
|
||||
delete(m.sessions, id)
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
}()
|
||||
|
||||
@@ -335,13 +358,13 @@ func (m *Manager) CollectProofs(id string, sess *session) {
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", id)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if err := m.orm.UpdateProvingStatus(id, orm.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", id, "err", err)
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -379,23 +402,25 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
|
||||
// StartProofGenerationSession starts a proof generation session
|
||||
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
|
||||
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
|
||||
roller := m.selectRoller()
|
||||
if roller == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start proof generation session", "id", task.ID)
|
||||
|
||||
var dbErr error
|
||||
defer func() {
|
||||
if dbErr != nil {
|
||||
log.Error("StartProofGenerationSession", "dbErr", dbErr)
|
||||
if !success {
|
||||
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "dbErr", dbErr, "err", err)
|
||||
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", task.ID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
|
||||
if err != nil {
|
||||
log.Error(
|
||||
@@ -420,37 +445,41 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("roller is picked", "name", roller.Name, "public_key", roller.PublicKey)
|
||||
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public_key", roller.PublicKey)
|
||||
|
||||
// send trace to roller
|
||||
roller.sendTask(task.ID, traces)
|
||||
if !roller.sendTask(task.ID, traces) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public_key", roller.PublicKey, "id", task.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
pk := roller.PublicKey
|
||||
sessionInfo := &orm.SessionInfo{
|
||||
ID: task.ID,
|
||||
Rollers: map[string]*orm.RollerStatus{
|
||||
pk: {
|
||||
PublicKey: pk,
|
||||
Name: roller.Name,
|
||||
Status: orm.RollerAssigned,
|
||||
},
|
||||
},
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sessionInfo); err != nil {
|
||||
log.Error("db set session info fail", "pk", pk, "error", err)
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
s := &session{
|
||||
info: sessionInfo,
|
||||
info: &orm.SessionInfo{
|
||||
ID: task.ID,
|
||||
Rollers: map[string]*orm.RollerStatus{
|
||||
pk: {
|
||||
PublicKey: pk,
|
||||
Name: roller.Name,
|
||||
Status: orm.RollerAssigned,
|
||||
},
|
||||
},
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(s.info); err != nil {
|
||||
log.Error("db set session info fail", "roller name", roller.Name, "public_key", pk, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[task.ID] = s
|
||||
m.mu.Unlock()
|
||||
|
||||
dbErr = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned)
|
||||
go m.CollectProofs(task.ID, s)
|
||||
go m.CollectProofs(s)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -3,20 +3,22 @@ package coordinator_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -24,23 +26,26 @@ import (
|
||||
"scroll-tech/coordinator"
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridge_config "scroll-tech/bridge/config"
|
||||
|
||||
coordinator_config "scroll-tech/coordinator/config"
|
||||
)
|
||||
|
||||
const managerURL = "localhost:8132"
|
||||
const newManagerURL = "localhost:8133"
|
||||
|
||||
var (
|
||||
cfg *bridge_config.Config
|
||||
dbImg docker.ImgInstance
|
||||
rollerManager *coordinator.Manager
|
||||
handle *http.Server
|
||||
cfg *bridge_config.Config
|
||||
dbImg docker.ImgInstance
|
||||
)
|
||||
|
||||
func setEnv(t *testing.T) error {
|
||||
var err error
|
||||
func randomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) (err error) {
|
||||
// Load config.
|
||||
cfg, err = bridge_config.NewConfig("../bridge/config.json")
|
||||
assert.NoError(t, err)
|
||||
@@ -48,13 +53,8 @@ func setEnv(t *testing.T) error {
|
||||
// Create db container.
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
// start roller manager
|
||||
rollerManager = setupRollerManager(t, cfg.DBConfig)
|
||||
|
||||
// start ws service
|
||||
handle, _, err = utils.StartWSEndpoint(managerURL, rollerManager.APIs())
|
||||
assert.NoError(t, err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
@@ -65,13 +65,12 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestFailedHandshake", testFailedHandshake)
|
||||
t.Run("TestSeveralConnections", testSeveralConnections)
|
||||
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
|
||||
t.Run("TestRollerReconnect", testRollerReconnect)
|
||||
// TODO: Restart roller alone when received task, can add this test case in integration-test.
|
||||
//t.Run("TestRollerReconnect", testRollerReconnect)
|
||||
t.Run("TestGracefulRestart", testGracefulRestart)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
handle.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
dbImg.Stop()
|
||||
})
|
||||
}
|
||||
@@ -83,9 +82,16 @@ func testHandshake(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
roller := newMockRoller(t, "roller_test")
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
defer roller.close()
|
||||
roller.connectToCoordinator(t, managerURL)
|
||||
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
}
|
||||
@@ -97,11 +103,16 @@ func testFailedHandshake(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// prepare
|
||||
name := "roller_test"
|
||||
wsURL := "ws://" + managerURL
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -120,8 +131,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
taskCh := make(chan *message.TaskMsg, 4)
|
||||
_, err = client.RegisterAndSubscribe(ctx, taskCh, authMsg)
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Try to perform handshake with timeouted token
|
||||
@@ -145,16 +155,11 @@ func testFailedHandshake(t *testing.T) {
|
||||
authMsg.Identity.Token = token
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
|
||||
tick := time.Tick(6 * time.Second)
|
||||
|
||||
<-tick
|
||||
taskCh = make(chan *message.TaskMsg, 4)
|
||||
_, err = client.RegisterAndSubscribe(ctx, taskCh, authMsg)
|
||||
<-time.After(6 * time.Second)
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func testSeveralConnections(t *testing.T) {
|
||||
@@ -164,6 +169,14 @@ func testSeveralConnections(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
var (
|
||||
batch = 100
|
||||
eg = errgroup.Group{}
|
||||
@@ -172,8 +185,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
for i := 0; i < batch; i++ {
|
||||
idx := i
|
||||
eg.Go(func() error {
|
||||
rollers[idx] = newMockRoller(t, "roller_test"+strconv.Itoa(idx))
|
||||
rollers[idx].connectToCoordinator(t, managerURL)
|
||||
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -183,8 +195,8 @@ func testSeveralConnections(t *testing.T) {
|
||||
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
// close connection
|
||||
for i := 0; i < batch; i++ {
|
||||
rollers[i].close()
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -211,16 +223,28 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
batch := 20
|
||||
rollers := make([]*mockRoller, batch)
|
||||
for i := 0; i < batch; i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i))
|
||||
defer rollers[i].close()
|
||||
rollers[i].connectToCoordinator(t, managerURL)
|
||||
go rollers[i].waitTaskAndSendProof(t, 1, false)
|
||||
rollers := make([]*mockRoller, 20)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false)
|
||||
}
|
||||
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
|
||||
defer func() {
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
|
||||
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 2)
|
||||
dbTx, err := l2db.Beginx()
|
||||
@@ -252,49 +276,6 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testRollerReconnect(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
var ids = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test")
|
||||
defer roller.close()
|
||||
roller.connectToCoordinator(t, managerURL)
|
||||
go roller.waitTaskAndSendProof(t, 1, true)
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(15 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskVerified {
|
||||
ids = ids[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGracefulRestart(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
@@ -311,42 +292,46 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test")
|
||||
roller.connectToCoordinator(t, managerURL)
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
|
||||
// wait 5 seconds, coordinator restarts before roller submits proof
|
||||
go roller.waitTaskAndSendProof(t, 5, true)
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
// wait 10 seconds, coordinator restarts before roller submits proof
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false)
|
||||
|
||||
// wait for coordinator to dispatch task
|
||||
<-time.After(3 * time.Second)
|
||||
|
||||
<-time.After(5 * time.Second)
|
||||
// the coordinator will delete the roller if the subscription is closed.
|
||||
roller.close()
|
||||
|
||||
// start new roller manager && ws service
|
||||
newRollerManager := setupRollerManager(t, cfg.DBConfig)
|
||||
handle, _, err = utils.StartWSEndpoint(newManagerURL, newRollerManager.APIs())
|
||||
assert.NoError(t, err)
|
||||
// Close rollerManager and ws handler.
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
|
||||
// Setup new coordinator and ws server.
|
||||
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
defer func() {
|
||||
newHandler.Shutdown(context.Background())
|
||||
newRollerManager.Stop()
|
||||
handle.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
for i := range ids {
|
||||
_, err = newRollerManager.GetSessionInfo(ids[i])
|
||||
info, err := newRollerManager.GetSessionInfo(ids[i])
|
||||
assert.Equal(t, orm.ProvingTaskAssigned.String(), info.Status)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// at this point, roller haven't submitted
|
||||
status, err := l2db.GetProvingStatusByID(ids[i])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskAssigned, status)
|
||||
}
|
||||
|
||||
// will overwrite the roller client for `SubmitProof`
|
||||
roller.connectToCoordinator(t, newManagerURL)
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true)
|
||||
defer roller.close()
|
||||
|
||||
// at this point, roller haven't submitted
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskAssigned, status)
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
@@ -370,12 +355,12 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupRollerManager(t *testing.T, dbCfg *database.DBConfig) *coordinator.Manager {
|
||||
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
// Get db handler.
|
||||
db, err := database.NewOrmFactory(dbCfg)
|
||||
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
|
||||
|
||||
rollerManager, err := coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
|
||||
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
|
||||
RollersPerSession: 1,
|
||||
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
@@ -384,101 +369,133 @@ func setupRollerManager(t *testing.T, dbCfg *database.DBConfig) *coordinator.Man
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
return rollerManager
|
||||
// start ws service
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs())
|
||||
assert.NoError(t, err)
|
||||
|
||||
return rollerManager, handler
|
||||
}
|
||||
|
||||
type mockRoller struct {
|
||||
rollerName string
|
||||
privKey *ecdsa.PrivateKey
|
||||
taskCh chan *message.TaskMsg
|
||||
sub ethereum.Subscription
|
||||
client *client2.Client
|
||||
stopCh chan struct{}
|
||||
|
||||
wsURL string
|
||||
client *client2.Client
|
||||
|
||||
taskCh chan *message.TaskMsg
|
||||
taskCache sync.Map
|
||||
|
||||
sub ethereum.Subscription
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newMockRoller(t *testing.T, rollerName string) *mockRoller {
|
||||
func newMockRoller(t *testing.T, rollerName string, wsURL string) *mockRoller {
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
return &mockRoller{
|
||||
|
||||
roller := &mockRoller{
|
||||
rollerName: rollerName,
|
||||
privKey: privKey,
|
||||
taskCh: make(chan *message.TaskMsg, 4)}
|
||||
wsURL: wsURL,
|
||||
taskCh: make(chan *message.TaskMsg, 4),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
roller.client, roller.sub, err = roller.connectToCoordinator()
|
||||
assert.NoError(t, err)
|
||||
|
||||
return roller
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the roller manager.
|
||||
func (r *mockRoller) connectToCoordinator(t *testing.T, wsURL string) {
|
||||
// create a new ws connection
|
||||
var err error
|
||||
r.client, err = client2.Dial("ws://" + wsURL)
|
||||
assert.NoError(t, err)
|
||||
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
|
||||
// Create connection.
|
||||
client, err := client2.Dial(r.wsURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// create a new ws connection
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: r.rollerName,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(r.privKey))
|
||||
_ = authMsg.Sign(r.privKey)
|
||||
|
||||
token, err := r.client.RequestToken(context.Background(), authMsg)
|
||||
assert.NoError(t, err)
|
||||
token, err := client.RequestToken(context.Background(), authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authMsg.Identity.Token = token
|
||||
_ = authMsg.Sign(r.privKey)
|
||||
|
||||
assert.NoError(t, authMsg.Sign(r.privKey))
|
||||
r.sub, err = r.client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
assert.NoError(t, err)
|
||||
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
return client, sub, nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-r.stopCh
|
||||
r.sub.Unsubscribe()
|
||||
}()
|
||||
func (r *mockRoller) releaseTasks() {
|
||||
r.taskCache.Range(func(key, value any) bool {
|
||||
r.taskCh <- value.(*message.TaskMsg)
|
||||
r.taskCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnectBeforeSendProof bool) {
|
||||
for {
|
||||
task := <-r.taskCh
|
||||
// simulate proof time
|
||||
<-time.After(proofTime * time.Second)
|
||||
if reconnectBeforeSendProof {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
r.reconnetToCoordinator(t)
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool) {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
if reconnect {
|
||||
var err error
|
||||
r.client, r.sub, err = r.connectToCoordinator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: task.ID,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
ok, err := r.client.SubmitProof(context.Background(), proof)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
// Release cached tasks.
|
||||
r.releaseTasks()
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
go r.loop(t, r.client, proofTime, r.stopCh)
|
||||
}
|
||||
|
||||
func (r *mockRoller) reconnetToCoordinator(t *testing.T) {
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: r.rollerName,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
},
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, stopCh chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case task := <-r.taskCh:
|
||||
r.taskCache.Store(task.ID, task)
|
||||
// simulate proof time
|
||||
select {
|
||||
case <-time.After(proofTime):
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: task.ID,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
ok, err := client.SubmitProof(context.Background(), proof)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(r.privKey))
|
||||
|
||||
token, err := r.client.RequestToken(context.Background(), authMsg)
|
||||
assert.NoError(t, err)
|
||||
authMsg.Identity.Token = token
|
||||
|
||||
assert.NoError(t, authMsg.Sign(r.privKey))
|
||||
r.sub, err = r.client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *mockRoller) close() {
|
||||
close(r.stopCh)
|
||||
r.sub.Unsubscribe()
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func (r *rollerNode) sendTask(id string, traces []*types.BlockTrace) bool {
|
||||
}:
|
||||
r.TaskIDs.Set(id, struct{}{})
|
||||
default:
|
||||
log.Warn("roller channel is full")
|
||||
log.Warn("roller channel is full", "roller name", r.Name, "public_key", r.PublicKey)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -77,6 +77,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
|
||||
roller := node.(*rollerNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(roller.registerTime) < 60 {
|
||||
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public_key", pubkey)
|
||||
return nil, fmt.Errorf("roller reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
|
||||
23
coordinator/verifier/mock.go
Normal file
23
coordinator/verifier/mock.go
Normal file
@@ -0,0 +1,23 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package verifier
|
||||
|
||||
import (
|
||||
"scroll-tech/common/message"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
)
|
||||
|
||||
// Verifier represents a mock halo2 verifier.
|
||||
type Verifier struct {
|
||||
}
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{}, nil
|
||||
}
|
||||
|
||||
// VerifyProof always return true
|
||||
func (v *Verifier) VerifyProof(proof *message.AggProof) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package verifier
|
||||
|
||||
/*
|
||||
|
||||
@@ -29,14 +29,3 @@ db_cli rollback
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
## db config
|
||||
|
||||
* db settings in config
|
||||
|
||||
```bash
|
||||
# DB_DSN: db data source name
|
||||
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
|
||||
# DB_DRIVER: db driver name
|
||||
export DB_DRIVER="postgres"
|
||||
```
|
||||
|
||||
80
database/cmd/app/app.go
Normal file
80
database/cmd/app/app.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// Set up database app info.
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
// Set up database app info.
|
||||
app.Name = "db_cli"
|
||||
app.Usage = "The Scroll Database CLI"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Clean and reset database.",
|
||||
Action: resetDB,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Check migration status.",
|
||||
Action: checkDBStatus,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Display the current database version.",
|
||||
Action: dbVersion,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "migrate",
|
||||
Usage: "Migrate the database to the latest version.",
|
||||
Action: migrateDB,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "rollback",
|
||||
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
|
||||
Action: rollbackDB,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
&cli.IntFlag{
|
||||
Name: "version",
|
||||
Usage: "Rollback to the specified version.",
|
||||
Value: 0,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
// Register `db_cli-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "db_cli-test")
|
||||
}
|
||||
|
||||
// Run run database cmd instance.
|
||||
func Run() {
|
||||
// RunApp the db_cli.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
database/cmd/app/app_test.go
Normal file
19
database/cmd/app/app_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunDatabase(t *testing.T) {
|
||||
bridge := cmd.NewCmd(t, "db_cli-test", "--version")
|
||||
defer bridge.WaitExit()
|
||||
|
||||
// wait result
|
||||
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("db_cli version %s", version.Version))
|
||||
bridge.RunApp(false)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -11,11 +11,16 @@ import (
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func initDB(file string) (*sqlx.DB, error) {
|
||||
func getConfig(ctx *cli.Context) (*database.DBConfig, error) {
|
||||
file := ctx.String(utils.ConfigFileFlag.Name)
|
||||
dbCfg, err := database.NewConfig(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dbCfg, nil
|
||||
}
|
||||
|
||||
func initDB(dbCfg *database.DBConfig) (*sqlx.DB, error) {
|
||||
factory, err := database.NewOrmFactory(dbCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -27,7 +32,11 @@ func initDB(file string) (*sqlx.DB, error) {
|
||||
|
||||
// resetDB clean or reset database.
|
||||
func resetDB(ctx *cli.Context) error {
|
||||
db, err := initDB(ctx.String(utils.ConfigFileFlag.Name))
|
||||
cfg, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -44,7 +53,11 @@ func resetDB(ctx *cli.Context) error {
|
||||
|
||||
// checkDBStatus check db status
|
||||
func checkDBStatus(ctx *cli.Context) error {
|
||||
db, err := initDB(ctx.String(utils.ConfigFileFlag.Name))
|
||||
cfg, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -54,7 +67,11 @@ func checkDBStatus(ctx *cli.Context) error {
|
||||
|
||||
// dbVersion return the latest version
|
||||
func dbVersion(ctx *cli.Context) error {
|
||||
db, err := initDB(ctx.String(utils.ConfigFileFlag.Name))
|
||||
cfg, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -67,7 +84,11 @@ func dbVersion(ctx *cli.Context) error {
|
||||
|
||||
// migrateDB migrate db
|
||||
func migrateDB(ctx *cli.Context) error {
|
||||
db, err := initDB(ctx.String(utils.ConfigFileFlag.Name))
|
||||
cfg, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,7 +98,11 @@ func migrateDB(ctx *cli.Context) error {
|
||||
|
||||
// rollbackDB rollback db by version
|
||||
func rollbackDB(ctx *cli.Context) error {
|
||||
db, err := initDB(ctx.String(utils.ConfigFileFlag.Name))
|
||||
cfg, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := initDB(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1,78 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
"scroll-tech/database/cmd/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set up database app info.
|
||||
app := cli.NewApp()
|
||||
app.Name = "db_cli"
|
||||
app.Usage = "The Scroll Database CLI"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Clean and reset database.",
|
||||
Action: resetDB,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Check migration status.",
|
||||
Action: checkDBStatus,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Display the current database version.",
|
||||
Action: dbVersion,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "migrate",
|
||||
Usage: "Migrate the database to the latest version.",
|
||||
Action: migrateDB,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rollback",
|
||||
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
|
||||
Action: rollbackDB,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
&cli.IntFlag{
|
||||
Name: "version",
|
||||
Usage: "Rollback to the specified version.",
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Run the database.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
app.Run()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// DBConfig db config
|
||||
@@ -31,9 +29,5 @@ func NewConfig(file string) (*DBConfig, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cover value by env fields
|
||||
cfg.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DSN)
|
||||
cfg.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DriverName)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ require (
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/mattn/go-sqlite3 v1.14.14
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
)
|
||||
|
||||
@@ -339,8 +339,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/pressly/goose/v3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
@@ -34,30 +33,57 @@ func initEnv(t *testing.T) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMigration(t *testing.T) {
|
||||
defer func() {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
}()
|
||||
func TestMigrate(t *testing.T) {
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err := Migrate(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
db := pgDB.DB
|
||||
version0, err := goose.GetDBVersion(db)
|
||||
assert.NoError(t, err)
|
||||
t.Log("current version is ", version0)
|
||||
|
||||
// rollback one version
|
||||
assert.NoError(t, Rollback(db, nil))
|
||||
|
||||
version1, err := Current(db)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check version expect less than 1
|
||||
assert.Equal(t, version0-1, version1)
|
||||
t.Cleanup(func() {
|
||||
if dbImg != nil {
|
||||
assert.NoError(t, dbImg.Stop())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, int(cur))
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur > 0)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, version > 0)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur+1 == version)
|
||||
}
|
||||
|
||||
@@ -328,6 +328,24 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetCommitTxHash(id string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT commit_tx_hash FROM block_batch WHERE id = $1`, id)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetFinalizeTxHash(id string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT finalize_tx_hash FROM block_batch WHERE id = $1`, id)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error {
|
||||
switch status {
|
||||
case RollupCommitted:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -152,7 +151,7 @@ func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) InsertBlockTraces(ctx context.Context, blockTraces []*types.BlockTrace) error {
|
||||
func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error {
|
||||
traceMaps := make([]map[string]interface{}, len(blockTraces))
|
||||
for i, trace := range blockTraces {
|
||||
number, hash, tx_num, mtime := trace.Header.Number.Int64(),
|
||||
@@ -3,6 +3,7 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -84,6 +85,19 @@ const (
|
||||
RollerProofInvalid
|
||||
)
|
||||
|
||||
func (s RollerProveStatus) String() string {
|
||||
switch s {
|
||||
case RollerAssigned:
|
||||
return "RollerAssigned"
|
||||
case RollerProofValid:
|
||||
return "RollerProofValid"
|
||||
case RollerProofInvalid:
|
||||
return "RollerProofInvalid"
|
||||
default:
|
||||
return fmt.Sprintf("Bad Value: %d", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// RollerStatus is the roller name and roller prove status
|
||||
type RollerStatus struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
@@ -108,7 +122,7 @@ type BlockTraceOrm interface {
|
||||
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
GetHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchID(batchID string) error
|
||||
InsertBlockTraces(ctx context.Context, blockTraces []*types.BlockTrace) error
|
||||
InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
SetBatchIDForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchID string) error
|
||||
}
|
||||
|
||||
@@ -132,6 +146,8 @@ type BlockBatchOrm interface {
|
||||
GetCommittedBatches() ([]string, error)
|
||||
GetRollupStatus(id string) (RollupStatus, error)
|
||||
GetRollupStatusByIDList(ids []string) ([]RollupStatus, error)
|
||||
GetCommitTxHash(id string) (sql.NullString, error)
|
||||
GetFinalizeTxHash(id string) (sql.NullString, error)
|
||||
GetLatestFinalizedBatch() (*BlockBatch, error)
|
||||
UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error
|
||||
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error
|
||||
@@ -159,6 +175,7 @@ type L2MessageOrm interface {
|
||||
MessageProofExist(nonce uint64) (bool, error)
|
||||
GetMessageProofByNonce(nonce uint64) (string, error)
|
||||
GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error)
|
||||
GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error)
|
||||
GetL2ProcessedNonce() (int64, error)
|
||||
SaveL2Messages(ctx context.Context, messages []*L2Message) error
|
||||
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
|
||||
|
||||
@@ -111,6 +111,30 @@ func (m *layer2MessageOrm) GetL2MessagesByStatus(status MsgStatus) ([]*L2Message
|
||||
return msgs, rows.Close()
|
||||
}
|
||||
|
||||
// GetL2MessagesByStatusUpToHeight fetch list of messages given msg status and an upper limit on height
|
||||
func (m *layer2MessageOrm) GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 AND height <= $2 ORDER BY nonce ASC;`, status, height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var msgs []*L2Message
|
||||
for rows.Next() {
|
||||
msg := &L2Message{}
|
||||
if err = rows.StructScan(&msg); err != nil {
|
||||
break
|
||||
}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no unprocessed layer2 messages in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgs, rows.Close()
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*L2Message) error {
|
||||
if len(messages) == 0 {
|
||||
|
||||
@@ -152,7 +152,7 @@ func testOrmBlockTraces(t *testing.T) {
|
||||
assert.Equal(t, false, exist)
|
||||
|
||||
// Insert into db
|
||||
err = ormBlock.InsertBlockTraces(context.Background(), []*types.BlockTrace{blockTrace})
|
||||
err = ormBlock.InsertBlockTraces([]*types.BlockTrace{blockTrace})
|
||||
assert.NoError(t, err)
|
||||
|
||||
res2, err := ormBlock.GetUnbatchedBlocks(map[string]interface{}{})
|
||||
|
||||
1
go.work
1
go.work
@@ -6,4 +6,5 @@ use (
|
||||
./coordinator
|
||||
./database
|
||||
./roller
|
||||
./tests/integration-test
|
||||
)
|
||||
|
||||
113
go.work.sum
113
go.work.sum
@@ -79,7 +79,6 @@ github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
|
||||
github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
|
||||
github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
@@ -183,19 +182,113 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
|
||||
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw=
|
||||
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
|
||||
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
|
||||
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
|
||||
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
|
||||
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
|
||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCAy1QjzINvKe/pYtLjo2dl59x2w9YSEJxuY=
|
||||
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=
|
||||
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
|
||||
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
|
||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k=
|
||||
google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user