mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff30622314 | ||
|
|
4bca06a4c1 | ||
|
|
e086c2739f | ||
|
|
4dfa81d890 | ||
|
|
4549439dab | ||
|
|
b4d8e5ffd7 | ||
|
|
5b49c81692 | ||
|
|
de735694fb | ||
|
|
166fd1c33d | ||
|
|
c4a67c7444 | ||
|
|
168eaf0fc2 | ||
|
|
85a1d5967f | ||
|
|
46f8b0e36c | ||
|
|
b7c39f64a7 | ||
|
|
7fea3f5c22 | ||
|
|
7afddae276 | ||
|
|
10ac638a51 | ||
|
|
2fafb64e0a | ||
|
|
ab1cda6568 | ||
|
|
905961d0ad | ||
|
|
cf9f0b921f | ||
|
|
0f26e0d67b | ||
|
|
c774462d1d | ||
|
|
7eb6d9696a | ||
|
|
401ea68332 | ||
|
|
c13e8aafc4 | ||
|
|
2b2cc62efe | ||
|
|
807b7c7f33 | ||
|
|
454f032c9f | ||
|
|
d1c4fa716d | ||
|
|
de1e40d19c | ||
|
|
76b5a6c751 | ||
|
|
071a7772dd | ||
|
|
9d61149311 | ||
|
|
0b607507b7 | ||
|
|
462e85e591 | ||
|
|
f46391c92b | ||
|
|
10cececb60 |
5
Jenkinsfile
vendored
5
Jenkinsfile
vendored
@@ -24,6 +24,7 @@ pipeline {
|
||||
steps {
|
||||
sh 'make dev_docker'
|
||||
sh 'make -C bridge mock_abi'
|
||||
sh 'make -C common/bytecode all'
|
||||
}
|
||||
}
|
||||
stage('Check Bridge Compilation') {
|
||||
@@ -58,12 +59,12 @@ pipeline {
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic scroll-tech/bridge/...'
|
||||
sh "cd ./bridge && ../build/run_tests.sh bridge"
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic scroll-tech/coordinator/...'
|
||||
sh "cd ./coordinator && ../build/run_tests.sh coordinator"
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
|
||||
@@ -1,3 +1,12 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
|
||||
|
||||
## Prerequisites
|
||||
+ go1.18
|
||||
+ rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ hardhat / foundry
|
||||
|
||||
---
|
||||
|
||||
For a more comprehensive doc, see [`docs/`](./docs).
|
||||
|
||||
@@ -104,7 +104,7 @@ var L1MessageQueueMetaData = &bind.MetaData{
|
||||
|
||||
// L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract.
|
||||
var L2GasPriceOracleMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// L2ScrollMessengerMetaData contains all meta data concerning the L2ScrollMessenger contract.
|
||||
|
||||
@@ -42,7 +42,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Register `event-watcher-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "event-watcher-test")
|
||||
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
@@ -45,7 +45,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Register `gas-oracle-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "gas-oracle-test")
|
||||
cutils.RegisterSimulation(app, cutils.GasOracleApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
106
bridge/cmd/mock_app.go
Normal file
106
bridge/cmd/mock_app.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
type MockApp struct {
|
||||
Config *config.Config
|
||||
base *docker.App
|
||||
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
|
||||
originFile string
|
||||
bridgeFile string
|
||||
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
|
||||
func NewBridgeApp(base *docker.App, file string) *MockApp {
|
||||
|
||||
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
|
||||
bridgeApp := &MockApp{
|
||||
base: base,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
originFile: file,
|
||||
bridgeFile: bridgeFile,
|
||||
args: []string{"--log.debug", "--config", bridgeFile},
|
||||
}
|
||||
if err := bridgeApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bridgeApp
|
||||
}
|
||||
|
||||
// RunApp run bridge-test child process by multi parameters.
|
||||
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
|
||||
if !(name == utils.EventWatcherApp ||
|
||||
name == utils.GasOracleApp ||
|
||||
name == utils.MessageRelayerApp ||
|
||||
name == utils.RollupRelayerApp) {
|
||||
t.Errorf(fmt.Sprintf("Don't support the mock app, name: %s", name))
|
||||
return
|
||||
}
|
||||
|
||||
if app, ok := b.mockApps[name]; ok {
|
||||
t.Logf(fmt.Sprintf("%s already exist, free the current and recreate again", string(name)))
|
||||
app.WaitExit()
|
||||
}
|
||||
appAPI := cmd.NewCmd(string(name), append(b.args, args...)...)
|
||||
keyword := fmt.Sprintf("Start %s successfully", string(name)[:len(string(name))-len("-test")])
|
||||
appAPI.RunApp(func() bool { return appAPI.WaitResult(t, time.Second*20, keyword) })
|
||||
b.mockApps[name] = appAPI
|
||||
}
|
||||
|
||||
// WaitExit wait util all processes exit.
|
||||
func (b *MockApp) WaitExit() {
|
||||
for _, app := range b.mockApps {
|
||||
app.WaitExit()
|
||||
}
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
}
|
||||
|
||||
// Free stop and release bridge mocked apps.
|
||||
func (b *MockApp) Free() {
|
||||
b.WaitExit()
|
||||
_ = os.Remove(b.bridgeFile)
|
||||
}
|
||||
|
||||
// MockConfig creates a new bridge config.
|
||||
func (b *MockApp) MockConfig(store bool) error {
|
||||
base := b.base
|
||||
// Load origin bridge config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
b.Config = cfg
|
||||
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
// Store changed bridge config into a temp file.
|
||||
data, err := json.Marshal(b.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(b.bridgeFile, data, 0600)
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "message-relayer-test")
|
||||
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
@@ -43,7 +43,7 @@ func init() {
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `rollup-relayer-test` app for integration-test.
|
||||
cutils.RegisterSimulation(app, "rollup-relayer-test")
|
||||
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package config_test
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -8,30 +8,48 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
cfg, err := config.NewConfig("../config.json")
|
||||
assert.True(t, assert.NoError(t, err), "failed to load config")
|
||||
t.Run("Success Case", func(t *testing.T) {
|
||||
cfg, err := NewConfig("../config.json")
|
||||
assert.NoError(t, err, "failed to load config")
|
||||
|
||||
assert.Equal(t, 1, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys))
|
||||
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys))
|
||||
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys))
|
||||
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys, 1)
|
||||
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpJosn := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
|
||||
defer func() { _ = os.Remove(tmpJosn) }()
|
||||
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
|
||||
defer func() { _ = os.Remove(tmpJSON) }()
|
||||
|
||||
assert.NoError(t, os.WriteFile(tmpJosn, data, 0644))
|
||||
assert.NoError(t, os.WriteFile(tmpJSON, data, 0644))
|
||||
|
||||
cfg2, err := config.NewConfig(tmpJosn)
|
||||
assert.NoError(t, err)
|
||||
cfg2, err := NewConfig(tmpJSON)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
|
||||
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
|
||||
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
|
||||
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
|
||||
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
|
||||
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
|
||||
})
|
||||
|
||||
t.Run("File Not Found", func(t *testing.T) {
|
||||
_, err := NewConfig("non_existent_file.json")
|
||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||
})
|
||||
|
||||
t.Run("Invalid JSON Content", func(t *testing.T) {
|
||||
// Create a temporary file with invalid JSON content
|
||||
tempFile, err := os.CreateTemp("", "invalid_json_config.json")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(tempFile.Name())
|
||||
|
||||
_, err = tempFile.WriteString("{ invalid_json: ")
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = NewConfig(tempFile.Name())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ type SenderConfig struct {
|
||||
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
|
||||
TxType string `json:"tx_type"`
|
||||
// The min balance set for check and set balance for sender's accounts.
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
MinBalance *big.Int `json:"min_balance"`
|
||||
// The interval (in seconds) to check balance and top up sender's accounts
|
||||
CheckBalanceTime uint64 `json:"check_balance_time"`
|
||||
// The sender's pending count limit.
|
||||
|
||||
@@ -3,9 +3,11 @@ module scroll-tech/bridge
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
golang.org/x/sync v0.1.0
|
||||
@@ -21,22 +23,30 @@ require (
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.2 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -33,13 +35,15 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
|
||||
@@ -47,6 +51,8 @@ github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBe
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -55,8 +61,10 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
@@ -74,9 +82,13 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -86,6 +98,12 @@ github.com/scroll-tech/zktrie v0.5.2 h1:U34jPXMLGOlRHfdvYp5VVgOcC0RuPeJmcS3bWotC
|
||||
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
|
||||
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
@@ -95,10 +113,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
@@ -107,20 +125,27 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package relayer_test
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,13 +6,42 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL1Message = []*types.L1Message{
|
||||
{
|
||||
QueueIndex: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash0",
|
||||
},
|
||||
{
|
||||
QueueIndex: 2,
|
||||
MsgHash: "msg_hash2",
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash1",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
@@ -21,7 +50,106 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := relayer.NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.SaveL1Messages(context.Background(), templateL1Message))
|
||||
relayer.ProcessSavedEvents()
|
||||
msg1, err := db.GetL1MessageByQueueIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg1.Status, types.MsgSubmitted)
|
||||
msg2, err := db.GetL1MessageByQueueIndex(2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg2.Status, types.MsgSubmitted)
|
||||
}
|
||||
|
||||
func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
// Set up the database and defer closing it.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data.
|
||||
assert.NoError(t, db.SaveL1Messages(context.Background(),
|
||||
[]*types.L1Message{
|
||||
{MsgHash: "msg-1", QueueIndex: 0},
|
||||
{MsgHash: "msg-2", QueueIndex: 1},
|
||||
}))
|
||||
|
||||
// Create and set up the Layer1 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := db.GetL1MessageByMsgHash("msg-1")
|
||||
msg2, err2 := db.GetL1MessageByMsgHash("msg-2")
|
||||
return err1 == nil && msg1.Status == types.MsgConfirmed &&
|
||||
err2 == nil && msg2.Status == types.MsgRelayFailed
|
||||
})
|
||||
}
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
// Set up the database and defer closing it.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data.
|
||||
assert.NoError(t, db.InsertL1Blocks(context.Background(),
|
||||
[]*types.L1BlockInfo{
|
||||
{Hash: "gas-oracle-1", Number: 0},
|
||||
{Hash: "gas-oracle-2", Number: 1},
|
||||
}))
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "gas-oracle-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "gas-oracle-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"})
|
||||
msg2, err2 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"})
|
||||
return err1 == nil && len(msg1) == 1 && msg1[0].GasOracleStatus == types.GasOracleImported &&
|
||||
err2 == nil && len(msg2) == 1 && msg2[0].GasOracleStatus == types.GasOracleFailed
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
@@ -444,12 +445,12 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}()
|
||||
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByHash(hash)
|
||||
proofBuffer, icBuffer, err := r.db.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
if proofBuffer == nil || icBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "hash", hash)
|
||||
return
|
||||
}
|
||||
@@ -457,13 +458,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(instanceBuffer))
|
||||
if len(icBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
instance := utils.BufferToUint256Le(icBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package relayer_test
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -13,8 +13,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -41,7 +42,7 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -54,7 +55,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.SaveL2Messages(context.Background(), templateL2Message)
|
||||
@@ -80,8 +81,8 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
Hash: common.Hash{}.Hex(),
|
||||
StateRoot: common.Hash{}.Hex(),
|
||||
}
|
||||
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -109,13 +110,13 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
Hash: common.Hash{}.Hex(),
|
||||
StateRoot: common.Hash{}.Hex(),
|
||||
}
|
||||
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
|
||||
dbTx, err := db.Beginx()
|
||||
@@ -150,7 +151,7 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
|
||||
@@ -207,6 +208,166 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerMsgConfirm(t *testing.T) {
|
||||
// Set up the database and defer closing it.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data.
|
||||
assert.NoError(t, db.SaveL2Messages(context.Background(), []*types.L2Message{
|
||||
{MsgHash: "msg-1", Nonce: 0}, {MsgHash: "msg-2", Nonce: 1},
|
||||
}))
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l2Relayer.processingMessage.Store("msg-1", "msg-1")
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l2Relayer.processingMessage.Store("msg-2", "msg-2")
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := db.GetL2MessageByMsgHash("msg-1")
|
||||
msg2, err2 := db.GetL2MessageByMsgHash("msg-2")
|
||||
return err1 == nil && msg1.Status == types.MsgConfirmed &&
|
||||
err2 == nil && msg2.Status == types.MsgRelayFailed
|
||||
})
|
||||
}
|
||||
|
||||
func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
// Set up the database and defer closing it.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data.
|
||||
batches := make([]*types.BatchData, 6)
|
||||
for i := 0; i < 6; i++ {
|
||||
batches[i] = genBatchData(t, uint64(i))
|
||||
}
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for _, batch := range batches {
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false, true, false}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
batchHashes := []string{batches[i*2].Hash().Hex(), batches[i*2+1].Hash().Hex()}
|
||||
l2Relayer.processingBatchesCommitment.Store(key, batchHashes)
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
})
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[2:] {
|
||||
batchHash := batches[i+4].Hash().Hex()
|
||||
l2Relayer.processingFinalization.Store(key, batchHash)
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i+2],
|
||||
TxHash: common.HexToHash("0x56789abcdef1234"),
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitFailed,
|
||||
types.RollupCommitFailed,
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
for i, batch := range batches[:6] {
|
||||
batchInDB, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
|
||||
if err != nil || len(batchInDB) != 1 || batchInDB[0].RollupStatus != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
// Set up the database and defer closing it.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
// Insert test data.
|
||||
batches := make([]*types.BatchData, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
batches[i] = genBatchData(t, uint64(i))
|
||||
}
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for _, batch := range batches {
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
isSuccessful := []bool{true, false}
|
||||
for i, batch := range batches {
|
||||
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: batch.Hash().Hex(),
|
||||
IsSuccessful: isSuccessful[i],
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
|
||||
for i, batch := range batches {
|
||||
gasOracle, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
|
||||
if err != nil || len(gasOracle) != 1 || gasOracle[0].OracleStatus != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func genBatchData(t *testing.T, index uint64) *types.BatchData {
|
||||
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package relayer_test
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -40,9 +40,9 @@ func setupEnv(t *testing.T) (err error) {
|
||||
|
||||
base.RunImages(t)
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig = base.DBConfig
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
@@ -99,10 +99,15 @@ func TestFunctions(t *testing.T) {
|
||||
}
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
|
||||
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
|
||||
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
t.Run("TestL2RelayerMsgConfirm", testL2RelayerMsgConfirm)
|
||||
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
}
|
||||
|
||||
@@ -23,10 +23,6 @@ type accountPool struct {
|
||||
|
||||
// newAccounts creates an accountPool instance.
|
||||
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
|
||||
if minBalance == nil {
|
||||
minBalance = big.NewInt(0)
|
||||
minBalance.SetString("100000000000000000000", 10)
|
||||
}
|
||||
accs := &accountPool{
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
|
||||
@@ -169,6 +169,12 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
|
||||
return s.confirmCh
|
||||
}
|
||||
|
||||
// SendConfirmation sends a confirmation to the confirmation channel.
|
||||
// Note: This function is only used in tests.
|
||||
func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
// NumberOfAccounts return the count of accounts.
|
||||
func (s *Sender) NumberOfAccounts() int {
|
||||
return len(s.auths.accounts)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sender_test
|
||||
package sender
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
@@ -30,6 +30,7 @@ var (
|
||||
privateKeys []*ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -50,125 +51,166 @@ func setupEnv(t *testing.T) {
|
||||
// Load default private key.
|
||||
privateKeys = []*ecdsa.PrivateKey{priv}
|
||||
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
|
||||
}
|
||||
|
||||
func TestSender(t *testing.T) {
|
||||
// Setup
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("test pending limit", func(t *testing.T) { testPendLimit(t) })
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test pending limit", testPendLimit)
|
||||
|
||||
t.Run("test min gas limit", func(t *testing.T) { testMinGasLimit(t) })
|
||||
t.Run("test min gas limit", testMinGasLimit)
|
||||
t.Run("test resubmit transaction", func(t *testing.T) { testResubmitTransaction(t) })
|
||||
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
}
|
||||
|
||||
func testPendLimit(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
senderCfg.PendingLimit = 2
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
defer newSender.Stop()
|
||||
|
||||
for i := 0; i < newSender.PendingLimit(); i++ {
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
|
||||
func testNewSender(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
// exit by ctx.Done()
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func testPendLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.True(t, err == nil || (err != nil && err.Error() == "sender's pending pool is full"))
|
||||
}
|
||||
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
|
||||
newSender.Stop()
|
||||
}
|
||||
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
|
||||
}
|
||||
|
||||
func testMinGasLimit(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
defer newSender.Stop()
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(senderCfg.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// MinGasLimit = 0
|
||||
txHash0, err := newSender.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, tx0.Gas(), uint64(0))
|
||||
// MinGasLimit = 0
|
||||
txHash0, err := newSender.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, tx0.Gas(), uint64(0))
|
||||
|
||||
// MinGasLimit = 100000
|
||||
txHash1, err := newSender.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tx1.Gas(), uint64(150000))
|
||||
// MinGasLimit = 100000
|
||||
txHash1, err := newSender.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tx1.Gas(), uint64(150000))
|
||||
|
||||
newSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testResubmitTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = s.resubmitTransaction(feeData, auth, tx)
|
||||
assert.NoError(t, err)
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchSender(t *testing.T, batchSize int) {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
for _, txType := range txTypes {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
privateKeys = append(privateKeys, priv)
|
||||
}
|
||||
privateKeys = append(privateKeys, priv)
|
||||
}
|
||||
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
senderCfg.PendingLimit = batchSize * TXBatch
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer newSender.Stop()
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = batchSize * TXBatch
|
||||
cfgCopy.TxType = txType
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// send transactions
|
||||
var (
|
||||
eg errgroup.Group
|
||||
idCache = cmap.New()
|
||||
confirmCh = newSender.ConfirmChan()
|
||||
)
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, sender.ErrNoAvailableAccount) || errors.Is(err, sender.ErrFullPending) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
// send transactions
|
||||
var (
|
||||
eg errgroup.Group
|
||||
idCache = cmap.New()
|
||||
confirmCh = newSender.ConfirmChan()
|
||||
)
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, ErrNoAvailableAccount) || errors.Is(err, ErrFullPending) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
idCache.Set(id, struct{}{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idCache.Set(id, struct{}{})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case cmsg := <-confirmCh:
|
||||
assert.Equal(t, true, cmsg.IsSuccessful)
|
||||
_, exist := idCache.Pop(cmsg.ID)
|
||||
assert.Equal(t, true, exist)
|
||||
// Receive all confirmed txs.
|
||||
if idCache.Count() == 0 {
|
||||
return
|
||||
}
|
||||
case <-after:
|
||||
t.Error("newSender test failed because of timeout")
|
||||
return
|
||||
return nil
|
||||
})
|
||||
}
|
||||
assert.NoError(t, eg.Wait())
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
isDone := false
|
||||
for !isDone {
|
||||
select {
|
||||
case cmsg := <-confirmCh:
|
||||
assert.Equal(t, true, cmsg.IsSuccessful)
|
||||
_, exist := idCache.Pop(cmsg.ID)
|
||||
assert.Equal(t, true, exist)
|
||||
// Receive all confirmed txs.
|
||||
if idCache.Count() == 0 {
|
||||
isDone = true
|
||||
}
|
||||
case <-after:
|
||||
t.Error("newSender test failed because of timeout")
|
||||
isDone = true
|
||||
}
|
||||
}
|
||||
newSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,15 +82,15 @@ func setupEnv(t *testing.T) {
|
||||
base.RunImages(t)
|
||||
|
||||
// Create l1geth container.
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L1Config.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
|
||||
|
||||
// Create l2geth container.
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.L2Config.Endpoint = base.L2GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
|
||||
// Create db container.
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
cfg.DBConfig = base.DBConfig
|
||||
|
||||
// Create l1geth and l2geth client.
|
||||
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/core/vm"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
//nolint:unused
|
||||
func blockTraceIsValid(trace *types.BlockTrace) bool {
|
||||
if trace == nil {
|
||||
log.Warn("block trace is empty")
|
||||
return false
|
||||
}
|
||||
flag := true
|
||||
for _, tx := range trace.ExecutionResults {
|
||||
flag = structLogResIsValid(tx.StructLogs) && flag
|
||||
}
|
||||
return flag
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
|
||||
res := true
|
||||
for i := 0; i < len(txLogs); i++ {
|
||||
txLog := txLogs[i]
|
||||
flag := true
|
||||
switch vm.StringToOp(txLog.Op) {
|
||||
case vm.CALL, vm.CALLCODE:
|
||||
flag = codeIsValid(txLog, 2) && flag
|
||||
flag = stateIsValid(txLog, 2) && flag
|
||||
case vm.DELEGATECALL, vm.STATICCALL:
|
||||
flag = codeIsValid(txLog, 2) && flag
|
||||
case vm.CREATE, vm.CREATE2:
|
||||
flag = stateIsValid(txLog, 1) && flag
|
||||
case vm.SLOAD, vm.SSTORE, vm.SELFBALANCE:
|
||||
flag = stateIsValid(txLog, 1) && flag
|
||||
case vm.SELFDESTRUCT:
|
||||
flag = stateIsValid(txLog, 2) && flag
|
||||
case vm.EXTCODEHASH, vm.BALANCE:
|
||||
flag = stateIsValid(txLog, 1) && flag
|
||||
}
|
||||
res = res && flag
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func codeIsValid(txLog *types.StructLogRes, n int) bool {
|
||||
extraData := txLog.ExtraData
|
||||
if extraData == nil {
|
||||
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
|
||||
return false
|
||||
} else if len(extraData.CodeList) < n {
|
||||
log.Warn("code list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.CodeList))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func stateIsValid(txLog *types.StructLogRes, n int) bool {
|
||||
extraData := txLog.ExtraData
|
||||
if extraData == nil {
|
||||
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
|
||||
return false
|
||||
} else if len(extraData.StateList) < n {
|
||||
log.Warn("stateList list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.StateList))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
|
||||
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
|
||||
if trace == nil {
|
||||
return false
|
||||
}
|
||||
eg := errgroup.Group{}
|
||||
for _, res := range trace.ExecutionResults {
|
||||
res := res
|
||||
eg.Go(func() error {
|
||||
for _, lg := range res.StructLogs {
|
||||
if _, ok := opcodes[lg.Op]; ok {
|
||||
return fmt.Errorf("unsupported opcde: %s", lg.Op)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := eg.Wait()
|
||||
return err != nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package utils_test
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -65,15 +63,14 @@ func TestMarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
want, err := json.Marshal(test.expected)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
if !test.mustFail {
|
||||
err = json.Unmarshal([]byte(test.input), &num)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
got, err := json.Marshal(&num)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
if string(want) != string(got) {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -88,20 +85,50 @@ func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
}
|
||||
|
||||
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
|
||||
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
|
||||
var blockNumber int64
|
||||
switch number.Int64() {
|
||||
case int64(rpc.LatestBlockNumber):
|
||||
blockNumber = int64(e.val)
|
||||
case int64(rpc.SafeBlockNumber):
|
||||
blockNumber = int64(e.val) - 6
|
||||
case int64(rpc.FinalizedBlockNumber):
|
||||
blockNumber = int64(e.val) - 12
|
||||
default:
|
||||
blockNumber = number.Int64()
|
||||
}
|
||||
if blockNumber < 0 {
|
||||
blockNumber = 0
|
||||
}
|
||||
|
||||
return &types.Header{Number: new(big.Int).SetInt64(blockNumber)}, nil
|
||||
}
|
||||
|
||||
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := MockEthClient{}
|
||||
|
||||
client.val = 5
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(0), confirmed)
|
||||
testCases := []struct {
|
||||
blockNumber uint64
|
||||
confirmation rpc.BlockNumber
|
||||
expectedResult uint64
|
||||
}{
|
||||
{5, 6, 0},
|
||||
{7, 6, 1},
|
||||
{10, 2, 8},
|
||||
{0, 1, 0},
|
||||
{3, 0, 3},
|
||||
{15, 15, 0},
|
||||
{16, rpc.SafeBlockNumber, 10},
|
||||
{22, rpc.FinalizedBlockNumber, 10},
|
||||
{10, rpc.LatestBlockNumber, 10},
|
||||
{5, rpc.SafeBlockNumber, 0},
|
||||
{11, rpc.FinalizedBlockNumber, 0},
|
||||
}
|
||||
|
||||
client.val = 7
|
||||
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(1), confirmed)
|
||||
for _, testCase := range testCases {
|
||||
client.val = testCase.blockNumber
|
||||
confirmed, err := GetLatestConfirmedBlockNumber(ctx, &client, testCase.confirmation)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCase.expectedResult, confirmed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,19 +29,6 @@ func ComputeMessageHash(
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
|
||||
func BufferToUint256Be(buffer []byte) []*big.Int {
|
||||
buffer256 := make([]*big.Int, len(buffer)/32)
|
||||
for i := 0; i < len(buffer)/32; i++ {
|
||||
buffer256[i] = big.NewInt(0)
|
||||
for j := 0; j < 32; j++ {
|
||||
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
|
||||
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
|
||||
}
|
||||
}
|
||||
return buffer256
|
||||
}
|
||||
|
||||
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
|
||||
func BufferToUint256Le(buffer []byte) []*big.Int {
|
||||
buffer256 := make([]*big.Int, len(buffer)/32)
|
||||
@@ -76,23 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||
// @todo: add unit test.
|
||||
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
@@ -1,34 +1,32 @@
|
||||
package utils_test
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
hash := utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
|
||||
hash := Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
|
||||
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
|
||||
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
|
||||
}
|
||||
|
||||
hash = utils.Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
|
||||
hash = Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
|
||||
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
|
||||
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
|
||||
}
|
||||
|
||||
hash = utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
hash = Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
|
||||
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeMessageHash(t *testing.T) {
|
||||
hash := utils.ComputeMessageHash(
|
||||
hash := ComputeMessageHash(
|
||||
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
|
||||
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
|
||||
big.NewInt(0),
|
||||
@@ -37,3 +35,15 @@ func TestComputeMessageHash(t *testing.T) {
|
||||
)
|
||||
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
|
||||
}
|
||||
|
||||
func TestBufferToUint256Le(t *testing.T) {
|
||||
input := []byte{
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
expectedOutput := []*big.Int{big.NewInt(1)}
|
||||
result := BufferToUint256Le(input)
|
||||
assert.Equal(t, expectedOutput, result)
|
||||
}
|
||||
|
||||
@@ -267,6 +267,40 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
approximatePayloadSize := func(hash string) (uint64, error) {
|
||||
traces, err := p.orm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(traces) != 1 {
|
||||
return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces))
|
||||
}
|
||||
size := 0
|
||||
for _, tx := range traces[0].Transactions {
|
||||
size += len(tx.Data)
|
||||
}
|
||||
return uint64(size), nil
|
||||
}
|
||||
|
||||
firstSize, err := approximatePayloadSize(blocks[0].Hash)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if firstSize > p.commitCalldataSizeLimit {
|
||||
log.Warn("oversized payload even for only 1 block", "height", blocks[0].Number, "size", firstSize)
|
||||
// note: we should probably fail here once we can ensure this will not happen
|
||||
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
return false
|
||||
}
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
return true
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > p.batchGasThreshold {
|
||||
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
|
||||
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
|
||||
@@ -293,17 +327,24 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var gasUsed, txNum uint64
|
||||
var gasUsed, txNum, payloadSize uint64
|
||||
reachThreshold := false
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blocks {
|
||||
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
|
||||
size, err := approximatePayloadSize(block.Hash)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch", "number", block.Number, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
|
||||
blocks = blocks[:i]
|
||||
reachThreshold = true
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
payloadSize += size
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package watcher_test
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -14,12 +18,74 @@ import (
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/relayer"
|
||||
"scroll-tech/bridge/watcher"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
func testBatchProposerProposeBatch(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
p := &BatchProposer{
|
||||
batchGasThreshold: 1000,
|
||||
batchTxNumThreshold: 10,
|
||||
batchTimeSec: 300,
|
||||
commitCalldataSizeLimit: 500,
|
||||
orm: db,
|
||||
}
|
||||
patchGuard := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
|
||||
hash, _ := fields["hash"].(string)
|
||||
if hash == "blockWithLongData" {
|
||||
longData := strings.Repeat("0", 1000)
|
||||
return []*types.WrappedBlock{{
|
||||
Transactions: []*geth_types.TransactionData{{
|
||||
Data: longData,
|
||||
}},
|
||||
}}, nil
|
||||
}
|
||||
return []*types.WrappedBlock{{
|
||||
Transactions: []*geth_types.TransactionData{{
|
||||
Data: "short",
|
||||
}},
|
||||
}}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
block1 := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
|
||||
block2 := &types.BlockInfo{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
block3 := &types.BlockInfo{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
block4 := &types.BlockInfo{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
blockOutdated := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
|
||||
blockWithLongData := &types.BlockInfo{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
blocks []*types.BlockInfo
|
||||
expectedRes bool
|
||||
}{
|
||||
{"Empty block list", []*types.BlockInfo{}, false},
|
||||
{"Single block exceeding gas threshold", []*types.BlockInfo{block4}, true},
|
||||
{"Single block exceeding transaction number threshold", []*types.BlockInfo{block3}, true},
|
||||
{"Multiple blocks meeting thresholds", []*types.BlockInfo{block1, block2, block3}, true},
|
||||
{"Multiple blocks not meeting thresholds", []*types.BlockInfo{block1, block2}, false},
|
||||
{"Outdated and valid block", []*types.BlockInfo{blockOutdated, block2}, true},
|
||||
{"Single block with long data", []*types.BlockInfo{blockWithLongData}, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerBatchGeneration(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
@@ -36,7 +102,7 @@ func testBatchProposerProposeBatch(t *testing.T) {
|
||||
assert.NoError(t, db.InsertWrappedBlocks([]*types.WrappedBlock{wrappedBlock1}))
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
batch, err := db.GetLatestBatch()
|
||||
@@ -52,7 +118,7 @@ func testBatchProposerProposeBatch(t *testing.T) {
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
@@ -115,7 +181,7 @@ func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
assert.Equal(t, 1, len(batchHashes))
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
@@ -100,6 +101,24 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedBlockHeight get processedBlockHeight
|
||||
// Currently only use for unit test
|
||||
func (w *L1WatcherClient) ProcessedBlockHeight() uint64 {
|
||||
return w.processedBlockHeight
|
||||
}
|
||||
|
||||
// Confirmations get confirmations
|
||||
// Currently only use for unit test
|
||||
func (w *L1WatcherClient) Confirmations() rpc.BlockNumber {
|
||||
return w.confirmations
|
||||
}
|
||||
|
||||
// SetConfirmations set the confirmations for L1WatcherClient
|
||||
// Currently only use for unit test
|
||||
func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
w.confirmations = confirmations
|
||||
}
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
|
||||
@@ -1,30 +1,514 @@
|
||||
package watcher_test
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
commonTypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/watcher"
|
||||
)
|
||||
|
||||
func testStartWatcher(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) {
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
client, err := ethclient.Dial(base.L1GethEndpoint())
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := watcher.NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
func testFetchContractEvent(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
}
|
||||
|
||||
func testL1WatcherClientFetchBlockHeader(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
convey.Convey("test toBlock < fromBlock", t, func() {
|
||||
var blockHeight uint64
|
||||
if watcher.ProcessedBlockHeight() <= 0 {
|
||||
blockHeight = 0
|
||||
} else {
|
||||
blockHeight = watcher.ProcessedBlockHeight() - 1
|
||||
}
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("test get header from client error", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
return nil, ethereum.NotFound
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("insert l1 block error", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
if height == nil {
|
||||
height = big.NewInt(100)
|
||||
}
|
||||
t.Log(height)
|
||||
return &types.Header{
|
||||
BaseFee: big.NewInt(100),
|
||||
}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
|
||||
return errors.New("insert failed")
|
||||
})
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("fetch block header success", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
if height == nil {
|
||||
height = big.NewInt(100)
|
||||
}
|
||||
t.Log(height)
|
||||
return &types.Header{
|
||||
BaseFee: big.NewInt(100),
|
||||
}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
watcher.SetConfirmations(rpc.SafeBlockNumber)
|
||||
convey.Convey("get latest confirmed block number failure", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
return nil, ethereum.NotFound
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
if height == nil {
|
||||
height = big.NewInt(100)
|
||||
}
|
||||
t.Log(height)
|
||||
return &types.Header{
|
||||
Number: big.NewInt(100),
|
||||
BaseFee: big.NewInt(100),
|
||||
}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("filter logs failure", t, func() {
|
||||
targetErr := errors.New("call filter failure")
|
||||
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
|
||||
return []types.Log{
|
||||
{
|
||||
Address: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
convey.Convey("parse bridge event logs failure", t, func() {
|
||||
targetErr := errors.New("parse log failure")
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
return nil, nil, nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, err.Error(), targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
rollupEvents := []rollupEvent{
|
||||
{
|
||||
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
status: commonTypes.RollupFinalized,
|
||||
},
|
||||
{
|
||||
batchHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
|
||||
status: commonTypes.RollupCommitted,
|
||||
},
|
||||
}
|
||||
|
||||
relayedMessageEvents := []relayedMessage{
|
||||
{
|
||||
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
isSuccessful: true,
|
||||
},
|
||||
{
|
||||
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
|
||||
isSuccessful: false,
|
||||
},
|
||||
}
|
||||
return nil, relayedMessageEvents, rollupEvents, nil
|
||||
})
|
||||
|
||||
convey.Convey("db get rollup status by hash list failure", t, func() {
|
||||
targetErr := errors.New("get db failure")
|
||||
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, err.Error(), targetErr.Error())
|
||||
})
|
||||
|
||||
convey.Convey("rollup status mismatch batch hashes length", t, func() {
|
||||
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
s := []commonTypes.RollupStatus{
|
||||
commonTypes.RollupFinalized,
|
||||
}
|
||||
return s, nil
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
s := []commonTypes.RollupStatus{
|
||||
commonTypes.RollupPending,
|
||||
commonTypes.RollupCommitting,
|
||||
}
|
||||
return s, nil
|
||||
})
|
||||
|
||||
convey.Convey("db update RollupFinalized status failure", t, func() {
|
||||
targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure")
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("db update RollupCommitted status failure", t, func() {
|
||||
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure")
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
|
||||
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("db save l1 message failure", t, func() {
|
||||
targetErr := errors.New("SaveL1Messages failure")
|
||||
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("FetchContractEvent success", t, func() {
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L1QueueTransactionEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack QueueTransaction log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog QueueTransaction failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1QueueTransactionEventSignature success", t, func() {
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L1QueueTransactionEvent)
|
||||
tmpOut.QueueIndex = big.NewInt(100)
|
||||
tmpOut.Data = []byte("test data")
|
||||
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpOut.Value = big.NewInt(1000)
|
||||
tmpOut.Target = common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
tmpOut.GasLimit = big.NewInt(10)
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, l2Messages, 1)
|
||||
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L1RelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack RelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog RelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L1RelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L1FailedRelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L1FailedRelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L1CommitBatchEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack CommitBatch log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog CommitBatch failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1CommitBatchEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L1CommitBatchEvent)
|
||||
tmpOut.BatchHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer db.Close()
|
||||
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L1FinalizeBatchEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FinalizeBatch log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FinalizeBatch failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1FinalizeBatchEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L1FinalizeBatchEvent)
|
||||
tmpOut.BatchHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
|
||||
@@ -1,31 +1,48 @@
|
||||
package watcher_test
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/watcher"
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func setupL2Watcher(t *testing.T) *L2WatcherClient {
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
return watcher
|
||||
}
|
||||
|
||||
func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
@@ -39,8 +56,8 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
}()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, l2db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
@@ -75,7 +92,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
}()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := watcher.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
@@ -155,8 +172,8 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
@@ -209,9 +226,36 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *watcher.L2WatcherClient {
|
||||
func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for dealing time
|
||||
<-time.After(3 * time.Second)
|
||||
|
||||
latestHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight)
|
||||
fetchedHeight, err := db.GetL2BlocksLatestHeight()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(fetchedHeight), latestHeight)
|
||||
}
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return watcher.NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
@@ -224,6 +268,175 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
|
||||
return auth
|
||||
}
|
||||
|
||||
func loopToFetchEvent(subCtx context.Context, watcher *watcher.L2WatcherClient) {
|
||||
func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
|
||||
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
|
||||
watcher := setupL2Watcher(t)
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{
|
||||
bridge_abi.L2SentMessageEventSignature,
|
||||
},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack SentMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog SentMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2SentMessageEventSignature success", t, func() {
|
||||
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
tmpValue := big.NewInt(1000)
|
||||
tmpMessageNonce := big.NewInt(100)
|
||||
tmpMessage := []byte("test for L2SentMessageEventSignature")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L2SentMessageEvent)
|
||||
tmpOut.Sender = tmpSendAddr
|
||||
tmpOut.Value = tmpValue
|
||||
tmpOut.Target = tmpTargetAddr
|
||||
tmpOut.MessageNonce = tmpMessageNonce
|
||||
tmpOut.Message = tmpMessage
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, l2Messages)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher := setupL2Watcher(t)
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L2RelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack RelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog RelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2RelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L2RelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher := setupL2Watcher(t)
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L2FailedRelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L2FailedRelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
|
||||
watcher := setupL2Watcher(t)
|
||||
logs := []geth_types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridge_abi.L2AppendMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack AppendMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog AppendMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2AppendMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
|
||||
tmpOut := out.(*bridge_abi.L2AppendMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
tmpOut.Index = big.NewInt(100)
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package watcher_test
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -35,9 +35,9 @@ func setupEnv(t *testing.T) (err error) {
|
||||
|
||||
base.RunImages(t)
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1GethEndpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2GethEndpoint()
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig = base.DBConfig
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
@@ -77,15 +77,29 @@ func TestFunction(t *testing.T) {
|
||||
if err := setupEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run l1 watcher test cases.
|
||||
t.Run("TestStartWatcher", testStartWatcher)
|
||||
t.Run("TestStartWatcher", testFetchContractEvent)
|
||||
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
|
||||
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
|
||||
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
|
||||
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
|
||||
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
|
||||
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
|
||||
t.Run("TestBatchProposerBatchGeneration", testBatchProposerBatchGeneration)
|
||||
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
RUN cargo build --release
|
||||
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
|
||||
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
|
||||
26
build/run_tests.sh
Executable file
26
build/run_tests.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
profile_name=$1
|
||||
|
||||
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd")
|
||||
|
||||
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
|
||||
coverpkg="scroll-tech/${profile_name}"
|
||||
|
||||
for pkg in $all_packages; do
|
||||
exclude_pkg=false
|
||||
for exclude_dir in "${exclude_dirs[@]}"; do
|
||||
if [[ $pkg == $exclude_dir* ]]; then
|
||||
exclude_pkg=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$exclude_pkg" = false ]; then
|
||||
coverpkg="$coverpkg,$pkg/..."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "coverage.${profile_name}.txt"
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...
|
||||
2
common/bytecode/.gitignore
vendored
Normal file
2
common/bytecode/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.go
|
||||
*.sol
|
||||
9
common/bytecode/Makefile
Normal file
9
common/bytecode/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
.PHONY: all erc20 greeter
|
||||
|
||||
all: erc20 greeter
|
||||
|
||||
erc20:
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./erc20/ERC20Mock.json --pkg erc20 --out ./erc20/ERC20Mock.go
|
||||
|
||||
greeter:
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --combined-json ./greeter/Greeter.json --pkg greeter --out ./greeter/Greeter.go
|
||||
40
common/bytecode/README.md
Normal file
40
common/bytecode/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
## How to pre deploy contracts?
|
||||
* Please reference to https://github.com/scroll-tech/genesis-creator.
|
||||
1. Setup env
|
||||
```bash
|
||||
git clone git@github.com:scroll-tech/genesis-creator.git
|
||||
cd genesis-creator
|
||||
go get -v github.com/scroll-tech/go-ethereum@develop && go mod tidy
|
||||
make abi && make genesis-creator
|
||||
make l2geth-docker
|
||||
```
|
||||
|
||||
2. Start docker and write pre deployed contracts into genesis file.
|
||||
```bash
|
||||
make start-docker
|
||||
./bin/genesis-creator -genesis ${SCROLLPATH}/common/docker/l2geth/genesis.json -contract [erc20|greeter]
|
||||
```
|
||||
|
||||
3. Rebuild l2geth docker.
|
||||
```bash
|
||||
cd ${SCROLLPATH}
|
||||
make dev_docker
|
||||
```
|
||||
|
||||
## How to get contract abi?
|
||||
* Other contracts' step same to eth20, e.g:
|
||||
1. Install solc.
|
||||
|
||||
*Reference to https://docs.soliditylang.org/en/latest/installing-solidity.html*
|
||||
|
||||
2. Get abi file.
|
||||
```bash
|
||||
cd genesis-creator
|
||||
solc --combined-json "abi" --optimize ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.sol | jq > ${SCROLLPATH}/common/bytecode/erc20/ERC20Mock.json
|
||||
```
|
||||
|
||||
3. Translate abi to go.
|
||||
```bash
|
||||
cd ${SCROLLPATH}
|
||||
make -C common/bytecode all
|
||||
```
|
||||
387
common/bytecode/erc20/ERC20Mock.json
Normal file
387
common/bytecode/erc20/ERC20Mock.json
Normal file
@@ -0,0 +1,387 @@
|
||||
{
|
||||
"contracts": {
|
||||
"tests/contracts/erc20/erc20.sol:ERC20Mock": {
|
||||
"abi": [
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "symbol",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "initialAccount",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "initialBalance",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "payable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Approval",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Transfer",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "allowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "approve",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "approveInternal",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "account",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "balanceOf",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "account",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "burn",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "decimals",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "subtractedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "decreaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "addedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "increaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "account",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mint",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "name",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "symbol",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "totalSupply",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transfer",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transferFrom",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transferInternal",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
|
||||
}
|
||||
72
common/bytecode/greeter/Greeter.json
Normal file
72
common/bytecode/greeter/Greeter.json
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"contracts": {
|
||||
"greeter/Greeter.sol:Greeter": {
|
||||
"abi": [
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "num",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "retrieve",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "retrieve_failing",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "num",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "set_value",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "num",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "set_value_failing",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": "0.8.16+commit.07a7930e.Darwin.appleclang"
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/modern-go/reflect2"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
@@ -27,208 +25,172 @@ var (
|
||||
dbStartPort = 30000
|
||||
)
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
// App is collection struct of runtime docker images
|
||||
type App struct {
|
||||
l1gethImg ImgInstance
|
||||
l2gethImg ImgInstance
|
||||
L1gethImg GethImgInstance
|
||||
L2gethImg GethImgInstance
|
||||
DBImg ImgInstance
|
||||
|
||||
dbImg ImgInstance
|
||||
dbConfig *database.DBConfig
|
||||
dbFile string
|
||||
dbClient *sql.DB
|
||||
DBConfig *database.DBConfig
|
||||
DBConfigFile string
|
||||
|
||||
// common time stamp.
|
||||
timestamp int
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dokerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
return &App{
|
||||
timestamp: timestamp,
|
||||
dbFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
app := &App{
|
||||
Timestamp: timestamp,
|
||||
L1gethImg: newTestL1Docker(),
|
||||
L2gethImg: newTestL2Docker(),
|
||||
DBImg: newTestDBDocker("postgres"),
|
||||
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
}
|
||||
if err := app.mockDBConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// RunImages runs all images togather
|
||||
func (b *App) RunImages(t *testing.T) {
|
||||
b.runDBImage(t)
|
||||
b.runL1Geth(t)
|
||||
b.runL2Geth(t)
|
||||
b.RunDBImage(t)
|
||||
b.RunL1Geth(t)
|
||||
b.RunL2Geth(t)
|
||||
}
|
||||
|
||||
func (b *App) runDBImage(t *testing.T) {
|
||||
if b.dbImg != nil {
|
||||
// RunDBImage starts postgres docker container.
|
||||
func (b *App) RunDBImage(t *testing.T) {
|
||||
if b.DBImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
b.dbImg = newTestDBDocker(t, "postgres")
|
||||
if err := b.mockDBConfig(); err != nil {
|
||||
_ = b.dbImg.Stop()
|
||||
b.dbImg = nil
|
||||
_ = os.Remove(b.dbFile)
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, b.DBImg.Start())
|
||||
var isRun bool
|
||||
// try 5 times until the db is ready.
|
||||
utils.TryTimes(10, func() bool {
|
||||
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
|
||||
isRun = err == nil && db != nil && db.Ping() == nil
|
||||
return isRun
|
||||
})
|
||||
assert.Equal(t, true, isRun)
|
||||
}
|
||||
|
||||
// RunDBApp runs DB app with command
|
||||
func (b *App) RunDBApp(t *testing.T, option, keyword string) {
|
||||
args := []string{option, "--config", b.dbFile}
|
||||
app := cmd.NewCmd("db_cli-test", args...)
|
||||
defer app.WaitExit()
|
||||
|
||||
// Wait expect result.
|
||||
app.ExpectWithTimeout(t, true, time.Second*3, keyword)
|
||||
app.RunApp(nil)
|
||||
}
|
||||
|
||||
// Free clear all running images
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.l1gethImg != nil {
|
||||
_ = b.l1gethImg.Stop()
|
||||
b.l1gethImg = nil
|
||||
if b.L1gethImg.IsRunning() {
|
||||
_ = b.L1gethImg.Stop()
|
||||
}
|
||||
if b.l2gethImg != nil {
|
||||
_ = b.l2gethImg.Stop()
|
||||
b.l2gethImg = nil
|
||||
if b.L2gethImg.IsRunning() {
|
||||
_ = b.L2gethImg.Stop()
|
||||
}
|
||||
if b.dbImg != nil {
|
||||
_ = b.dbImg.Stop()
|
||||
b.dbImg = nil
|
||||
_ = os.Remove(b.dbFile)
|
||||
if b.DBImg.IsRunning() {
|
||||
_ = b.DBImg.Stop()
|
||||
_ = os.Remove(b.DBConfigFile)
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
_ = b.dbClient.Close()
|
||||
b.dbClient = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// L1GethEndpoint returns l1gethimg endpoint
|
||||
func (b *App) L1GethEndpoint() string {
|
||||
if b.l1gethImg != nil {
|
||||
return b.l1gethImg.Endpoint()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// L2GethEndpoint returns l2gethimg endpoint
|
||||
func (b *App) L2GethEndpoint() string {
|
||||
if b.l2gethImg != nil {
|
||||
return b.l2gethImg.Endpoint()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DBEndpoint returns the endpoint of the dbimg
|
||||
func (b *App) DBEndpoint() string {
|
||||
return b.dbImg.Endpoint()
|
||||
}
|
||||
|
||||
func (b *App) runL1Geth(t *testing.T) {
|
||||
if b.l1gethImg != nil {
|
||||
// RunL1Geth starts l1geth docker container.
|
||||
func (b *App) RunL1Geth(t *testing.T) {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
b.l1gethImg = newTestL1Docker(t)
|
||||
assert.NoError(t, b.L1gethImg.Start())
|
||||
}
|
||||
|
||||
// L1Client returns a ethclient by dialing running l1geth
|
||||
func (b *App) L1Client() (*ethclient.Client, error) {
|
||||
if b.l1gethImg == nil || reflect2.IsNil(b.l1gethImg) {
|
||||
if utils.IsNil(b.L1gethImg) {
|
||||
return nil, fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.l1gethImg.Endpoint())
|
||||
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (b *App) runL2Geth(t *testing.T) {
|
||||
if b.l2gethImg != nil {
|
||||
// RunL2Geth starts l2geth docker container.
|
||||
func (b *App) RunL2Geth(t *testing.T) {
|
||||
if b.L2gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
b.l2gethImg = newTestL2Docker(t)
|
||||
assert.NoError(t, b.L2gethImg.Start())
|
||||
}
|
||||
|
||||
// L2Client returns a ethclient by dialing running l2geth
|
||||
func (b *App) L2Client() (*ethclient.Client, error) {
|
||||
if b.l2gethImg == nil || reflect2.IsNil(b.l2gethImg) {
|
||||
if utils.IsNil(b.L2gethImg) {
|
||||
return nil, fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.l2gethImg.Endpoint())
|
||||
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// DBClient create and return *sql.DB instance.
|
||||
func (b *App) DBClient(t *testing.T) *sql.DB {
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
return b.dbClient
|
||||
}
|
||||
var (
|
||||
cfg = b.DBConfig
|
||||
err error
|
||||
)
|
||||
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
|
||||
assert.NoError(t, err)
|
||||
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
|
||||
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
|
||||
assert.NoError(t, b.dbClient.Ping())
|
||||
return b.dbClient
|
||||
}
|
||||
|
||||
func (b *App) mockDBConfig() error {
|
||||
if b.dbConfig == nil {
|
||||
|
||||
b.dbConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
b.DBConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
if b.dbImg != nil {
|
||||
b.dbConfig.DSN = b.dbImg.Endpoint()
|
||||
if b.DBImg != nil {
|
||||
b.DBConfig.DSN = b.DBImg.Endpoint()
|
||||
}
|
||||
data, err := json.Marshal(b.dbConfig)
|
||||
data, err := json.Marshal(b.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(b.dbFile, data, 0644) //nolint:gosec
|
||||
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
|
||||
}
|
||||
|
||||
func newTestL1Docker(t *testing.T) ImgInstance {
|
||||
func newTestL1Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL1geth := NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL1geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, _ := ethclient.Dial(imgL1geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL1geth
|
||||
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestL2Docker(t *testing.T) ImgInstance {
|
||||
func newTestL2Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgL2geth := NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgL2geth.Start())
|
||||
|
||||
// try 3 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, _ := ethclient.Dial(imgL2geth.Endpoint())
|
||||
if client != nil {
|
||||
if _, err := client.ChainID(context.Background()); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgL2geth
|
||||
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestDBDocker(t *testing.T, driverName string) ImgInstance {
|
||||
func newTestDBDocker(driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
imgDB := NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
assert.NoError(t, imgDB.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
utils.TryTimes(10, func() bool {
|
||||
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
|
||||
if db != nil {
|
||||
return db.Ping() == nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return imgDB
|
||||
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
@@ -75,14 +75,16 @@ func (i *ImgDB) Stop() error {
|
||||
|
||||
// Endpoint return the dsn.
|
||||
func (i *ImgDB) Endpoint() string {
|
||||
if !i.running {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgDB) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"docker", "run", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
|
||||
@@ -3,11 +3,13 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -23,13 +25,14 @@ type ImgGeth struct {
|
||||
ipcPath string
|
||||
httpPort int
|
||||
wsPort int
|
||||
chainID *big.Int
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) ImgInstance {
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
@@ -53,14 +56,27 @@ func (i *ImgGeth) Start() error {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
|
||||
// try 10 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, err := ethclient.Dial(i.Endpoint())
|
||||
if err == nil && client != nil {
|
||||
i.chainID, err = client.ChainID(context.Background())
|
||||
return err == nil && i.chainID != nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgGeth) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
// Endpoint return the connection endpoint.
|
||||
func (i *ImgGeth) Endpoint() string {
|
||||
if !i.running {
|
||||
return ""
|
||||
}
|
||||
switch true {
|
||||
case i.httpPort != 0:
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
|
||||
@@ -71,6 +87,11 @@ func (i *ImgGeth) Endpoint() string {
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID return chainID.
|
||||
func (i *ImgGeth) ChainID() *big.Int {
|
||||
return i.chainID
|
||||
}
|
||||
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
@@ -125,7 +146,7 @@ func (i *ImgGeth) Stop() error {
|
||||
}
|
||||
|
||||
func (i *ImgGeth) prepare() []string {
|
||||
cmds := []string{"docker", "run", "--name", i.name}
|
||||
cmds := []string{"docker", "run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
_ "scroll-tech/database/cmd/app"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
@@ -25,50 +23,32 @@ func TestMain(m *testing.M) {
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestStartProcess(t *testing.T) {
|
||||
base.RunImages(t)
|
||||
func TestDB(t *testing.T) {
|
||||
base.RunDBImage(t)
|
||||
|
||||
// migrate db.
|
||||
base.RunDBApp(t, "reset", "successful to reset")
|
||||
base.RunDBApp(t, "migrate", "current version:")
|
||||
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
func TestDocker(t *testing.T) {
|
||||
base.RunImages(t)
|
||||
t.Parallel()
|
||||
t.Run("testL1Geth", testL1Geth)
|
||||
t.Run("testL2Geth", testL2Geth)
|
||||
t.Run("testDB", testDB)
|
||||
}
|
||||
|
||||
func testL1Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
func TestL1Geth(t *testing.T) {
|
||||
base.RunL1Geth(t)
|
||||
|
||||
client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(ctx)
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func testL2Geth(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
func TestL2Geth(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
|
||||
client, err := base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(ctx)
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func testDB(t *testing.T) {
|
||||
driverName := "postgres"
|
||||
|
||||
db, err := sqlx.Open(driverName, base.DBEndpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -26,6 +27,13 @@ type ImgInstance interface {
|
||||
Start() error
|
||||
Stop() error
|
||||
Endpoint() string
|
||||
IsRunning() bool
|
||||
}
|
||||
|
||||
// GethImgInstance based on ImgInstance and add chainID interface.
|
||||
type GethImgInstance interface {
|
||||
ImgInstance
|
||||
ChainID() *big.Int
|
||||
}
|
||||
|
||||
// GetContainerID returns the ID of Container.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:prealpha-v5.1
|
||||
FROM scrolltech/l2geth:scroll-v3.1.4
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -5,9 +5,9 @@ go 1.18
|
||||
require (
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/mattn/go-isatty v0.0.18
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
|
||||
@@ -32,8 +32,10 @@ require (
|
||||
github.com/ethereum/go-ethereum v1.11.4 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
@@ -51,13 +53,14 @@ require (
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/gomega v1.26.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
@@ -65,7 +68,9 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.2 // indirect
|
||||
@@ -73,19 +78,19 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
|
||||
@@ -108,8 +108,9 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1T
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
@@ -119,8 +120,9 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
@@ -244,8 +246,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
@@ -259,11 +261,13 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
@@ -297,8 +301,9 @@ github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
|
||||
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
@@ -335,10 +340,14 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -380,10 +389,10 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
@@ -444,8 +453,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -467,8 +476,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -519,11 +528,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -531,14 +541,14 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -569,13 +579,12 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char};
|
||||
use libc::c_char;
|
||||
use std::cell::OnceCell;
|
||||
use std::panic;
|
||||
use std::ptr::null;
|
||||
use types::eth::BlockTrace;
|
||||
use zkevm::circuit::AGG_DEGREE;
|
||||
use zkevm::utils::{load_or_create_params, load_or_create_seed};
|
||||
use zkevm::utils::{load_params, load_seed};
|
||||
use zkevm::{circuit::DEGREE, prover::Prover};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -15,9 +17,9 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
|
||||
|
||||
let params_path = c_char_to_str(params_path);
|
||||
let seed_path = c_char_to_str(seed_path);
|
||||
let params = load_or_create_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let seed = load_or_create_seed(seed_path).unwrap();
|
||||
let params = load_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let seed = load_seed(seed_path).unwrap();
|
||||
let p = Prover::from_params_and_seed(params, agg_params, seed);
|
||||
PROVER.set(p).unwrap();
|
||||
}
|
||||
@@ -27,13 +29,15 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
|
||||
pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c_char {
|
||||
let trace_vec = c_char_to_vec(trace_char);
|
||||
let trace = serde_json::from_slice::<BlockTrace>(&trace_vec).unwrap();
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.create_agg_circuit_proof(&trace)
|
||||
.unwrap();
|
||||
let proof_bytes = serde_json::to_vec(&proof).unwrap();
|
||||
vec_to_c_char(proof_bytes)
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.create_agg_circuit_proof(&trace)
|
||||
.unwrap();
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
@@ -41,11 +45,13 @@ pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c
|
||||
pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *const c_char {
|
||||
let trace_vec = c_char_to_vec(trace_char);
|
||||
let traces = serde_json::from_slice::<Vec<BlockTrace>>(&trace_vec).unwrap();
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.create_agg_circuit_proof_batch(traces.as_slice())
|
||||
.unwrap();
|
||||
let proof_bytes = serde_json::to_vec(&proof).unwrap();
|
||||
vec_to_c_char(proof_bytes)
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.create_agg_circuit_proof_batch(traces.as_slice())
|
||||
.unwrap();
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::panic;
|
||||
use zkevm::circuit::{AGG_DEGREE, DEGREE};
|
||||
use zkevm::prover::AggCircuitProof;
|
||||
use zkevm::utils::load_or_create_params;
|
||||
use zkevm::utils::load_params;
|
||||
use zkevm::verifier::Verifier;
|
||||
|
||||
static mut VERIFIER: Option<&Verifier> = None;
|
||||
@@ -20,8 +21,8 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
|
||||
let mut agg_vk = vec![];
|
||||
f.read_to_end(&mut agg_vk).unwrap();
|
||||
|
||||
let params = load_or_create_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
|
||||
let params = load_params(params_path, *DEGREE).unwrap();
|
||||
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
|
||||
|
||||
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
|
||||
VERIFIER = Some(Box::leak(v))
|
||||
@@ -32,9 +33,11 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
|
||||
pub unsafe extern "C" fn verify_agg_proof(proof: *const c_char) -> c_char {
|
||||
let proof_vec = c_char_to_vec(proof);
|
||||
let agg_proof = serde_json::from_slice::<AggCircuitProof>(proof_vec.as_slice()).unwrap();
|
||||
let verified = VERIFIER
|
||||
.unwrap()
|
||||
.verify_agg_circuit_proof(agg_proof)
|
||||
.is_ok();
|
||||
verified as c_char
|
||||
let verified = panic::catch_unwind(|| {
|
||||
VERIFIER
|
||||
.unwrap()
|
||||
.verify_agg_circuit_proof(agg_proof)
|
||||
.is_ok()
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
@@ -163,6 +165,7 @@ type SessionInfo struct {
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
Attempts uint8 `json:"attempts,omitempty"`
|
||||
ProveType message.ProveType `json:"prove_type,omitempty"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
@@ -256,3 +259,16 @@ type BlockBatch struct {
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
|
||||
// AggTask is a wrapper type around db AggProveTask type.
|
||||
type AggTask struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
|
||||
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
|
||||
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
|
||||
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedTime *time.Time `json:"created_time" db:"created_time"`
|
||||
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"`
|
||||
}
|
||||
|
||||
@@ -22,6 +22,27 @@ const (
|
||||
StatusProofError
|
||||
)
|
||||
|
||||
// ProveType represents the type of roller.
|
||||
type ProveType uint8
|
||||
|
||||
func (r ProveType) String() string {
|
||||
switch r {
|
||||
case BasicProve:
|
||||
return "Basic Prove"
|
||||
case AggregatorProve:
|
||||
return "Aggregator Prove"
|
||||
default:
|
||||
return "Illegal Prove type"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BasicProve is default roller, it only generates zk proof from traces.
|
||||
BasicProve ProveType = iota
|
||||
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof.
|
||||
AggregatorProve
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Roller identification
|
||||
// known to the Sequencer.
|
||||
@@ -36,10 +57,10 @@ type AuthMsg struct {
|
||||
type Identity struct {
|
||||
// Roller name
|
||||
Name string `json:"name"`
|
||||
// Roller RollerType
|
||||
RollerType ProveType `json:"roller_type,omitempty"`
|
||||
// Unverified Unix timestamp of message creation
|
||||
Timestamp uint32 `json:"timestamp"`
|
||||
// Roller public key
|
||||
PublicKey string `json:"publicKey"`
|
||||
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
|
||||
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
|
||||
Version string `json:"version"`
|
||||
@@ -56,13 +77,14 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// Sign auth message
|
||||
func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
@@ -80,36 +102,27 @@ func (a *AuthMsg) Verify() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
if a.Identity.PublicKey == "" {
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
|
||||
}
|
||||
|
||||
return crypto.VerifySignature(common.FromHex(a.Identity.PublicKey), hash, sig[:len(sig)-1]), nil
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
if a.Identity.PublicKey == "" {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
|
||||
return a.Identity.PublicKey, nil
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return a.Identity.PublicKey, nil
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
@@ -188,14 +201,19 @@ func (a *ProofMsg) PublicKey() (string, error) {
|
||||
|
||||
// TaskMsg is a wrapper type around db ProveTask type.
|
||||
type TaskMsg struct {
|
||||
ID string `json:"id"`
|
||||
Traces []*types.BlockTrace `json:"blockTraces"`
|
||||
ID string `json:"id"`
|
||||
Type ProveType `json:"type,omitempty"`
|
||||
// Only basic rollers need traces, aggregator rollers don't!
|
||||
Traces []*types.BlockTrace `json:"blockTraces,omitempty"`
|
||||
// Only aggregator rollers need proofs to aggregate, basic rollers don't!
|
||||
SubProofs [][]byte `json:"sub_proofs,omitempty"`
|
||||
}
|
||||
|
||||
// ProofDetail is the message received from rollers that contains zk proof, the status of
|
||||
// the proof generation succeeded, and an error message if proof generation failed.
|
||||
type ProofDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type ProveType `json:"type,omitempty"`
|
||||
Status RespStatus `json:"status"`
|
||||
Proof *AggProof `json:"proof"`
|
||||
Error string `json:"error,omitempty"`
|
||||
@@ -19,7 +19,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
|
||||
// check public key.
|
||||
pk, err := authMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
|
||||
|
||||
ok, err := authMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
@@ -8,10 +8,30 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// MockAppName a new type mock app.
|
||||
type MockAppName string
|
||||
|
||||
var (
|
||||
// EventWatcherApp the name of mock event-watcher app.
|
||||
EventWatcherApp MockAppName = "event-watcher-test"
|
||||
// GasOracleApp the name of mock gas-oracle app.
|
||||
GasOracleApp MockAppName = "gas-oracle-test"
|
||||
// MessageRelayerApp the name of mock message-relayer app.
|
||||
MessageRelayerApp MockAppName = "message-relayer-test"
|
||||
// RollupRelayerApp the name of mock rollup-relayer app.
|
||||
RollupRelayerApp MockAppName = "rollup-relayer-test"
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
// DBCliApp the name of mock database app.
|
||||
DBCliApp MockAppName = "db_cli-test"
|
||||
// RollerApp the name of mock roller app.
|
||||
RollerApp MockAppName = "roller-test"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
func RegisterSimulation(app *cli.App, name string) {
|
||||
func RegisterSimulation(app *cli.App, name MockAppName) {
|
||||
// Run the app for integration-test
|
||||
reexec.Register(name, func() {
|
||||
reexec.Register(string(name), func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -3,6 +3,8 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/modern-go/reflect2"
|
||||
)
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
@@ -42,3 +44,8 @@ func Loop(ctx context.Context, period time.Duration, f func()) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsNil Check if the interface is empty.
|
||||
func IsNil(i interface{}) bool {
|
||||
return i == nil || reflect2.IsNil(i)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v3.0.7"
|
||||
var tag = "v3.0.14"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -13,7 +13,7 @@ The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 an
|
||||
### batchWithdrawERC1155
|
||||
|
||||
```solidity
|
||||
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
|
||||
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
|
||||
@@ -32,7 +32,7 @@ Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
|
||||
### batchWithdrawERC1155
|
||||
|
||||
```solidity
|
||||
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
|
||||
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
|
||||
@@ -319,7 +319,7 @@ Update layer 2 to layer 1 token mapping.
|
||||
### withdrawERC1155
|
||||
|
||||
```solidity
|
||||
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
|
||||
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
@@ -338,7 +338,7 @@ Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
### withdrawERC1155
|
||||
|
||||
```solidity
|
||||
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
|
||||
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
|
||||
@@ -13,7 +13,7 @@ The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and
|
||||
### batchWithdrawERC721
|
||||
|
||||
```solidity
|
||||
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
|
||||
function batchWithdrawERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
|
||||
@@ -31,7 +31,7 @@ Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
|
||||
### batchWithdrawERC721
|
||||
|
||||
```solidity
|
||||
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
|
||||
function batchWithdrawERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
|
||||
@@ -266,7 +266,7 @@ Update layer 2 to layer 1 token mapping.
|
||||
### withdrawERC721
|
||||
|
||||
```solidity
|
||||
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external nonpayable
|
||||
function withdrawERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
@@ -284,7 +284,7 @@ Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
### withdrawERC721
|
||||
|
||||
```solidity
|
||||
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external nonpayable
|
||||
function withdrawERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external payable
|
||||
```
|
||||
|
||||
Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
|
||||
@@ -216,12 +216,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, _to, _value, _messageNonce, _message);
|
||||
|
||||
// compute and deduct the messaging fee to fee vault.
|
||||
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(
|
||||
address(this),
|
||||
_counterpart,
|
||||
_xDomainCalldata,
|
||||
_gasLimit
|
||||
);
|
||||
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_gasLimit);
|
||||
require(msg.value >= _fee + _value, "Insufficient msg.value");
|
||||
if (_fee > 0) {
|
||||
(bool _success, ) = feeVault.call{value: _fee}("");
|
||||
|
||||
@@ -85,7 +85,7 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
|
||||
// but it seems not a big problem.
|
||||
IERC20Upgradeable(_l1Token).safeTransfer(_to, _amount);
|
||||
|
||||
// @todo forward `_data` to `_to` in the near future
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ contract L1ETHGateway is Initializable, ScrollGatewayBase, IL1ETHGateway {
|
||||
(bool _success, ) = _to.call{value: _amount}("");
|
||||
require(_success, "ETH transfer failed");
|
||||
|
||||
// @todo farward _data to `_to` in near future.
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeWithdrawETH(_from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gate
|
||||
// but it seems not a big problem.
|
||||
IERC20(_l1Token).safeTransfer(_to, _amount);
|
||||
|
||||
// @todo forward `_data` to `_to` in the near future
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
IWETH(_l1Token).deposit{value: _amount}();
|
||||
IERC20(_l1Token).safeTransfer(_to, _amount);
|
||||
|
||||
// @todo forward `_data` to `_to`.
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -36,16 +36,8 @@ interface IL1MessageQueue {
|
||||
function getCrossDomainMessage(uint256 queueIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the amount of ETH should pay for cross domain message.
|
||||
/// @param sender The address of account who initiates the message in L1.
|
||||
/// @param target The address of account who will recieve the message in L2.
|
||||
/// @param message The content of the message.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on L2.
|
||||
function estimateCrossDomainMessageFee(
|
||||
address sender,
|
||||
address target,
|
||||
bytes memory message,
|
||||
uint256 gasLimit
|
||||
) external view returns (uint256);
|
||||
function estimateCrossDomainMessageFee(uint256 gasLimit) external view returns (uint256);
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
|
||||
@@ -4,13 +4,6 @@ pragma solidity ^0.8.0;
|
||||
|
||||
interface IL2GasPriceOracle {
|
||||
/// @notice Estimate fee for cross chain message call.
|
||||
/// @param _sender The address of sender who invoke the call.
|
||||
/// @param _to The target address to receive the call.
|
||||
/// @param _message The message will be passed to the target address.
|
||||
function estimateCrossDomainMessageFee(
|
||||
address _sender,
|
||||
address _to,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external view returns (uint256);
|
||||
/// @param _gasLimit Gas limit required to complete the message relay on L2.
|
||||
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view returns (uint256);
|
||||
}
|
||||
|
||||
@@ -61,15 +61,10 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1MessageQueue
|
||||
function estimateCrossDomainMessageFee(
|
||||
address _sender,
|
||||
address _target,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external view override returns (uint256) {
|
||||
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view override returns (uint256) {
|
||||
address _oracle = gasOracle;
|
||||
if (_oracle == address(0)) return 0;
|
||||
return IL2GasPriceOracle(_oracle).estimateCrossDomainMessageFee(_sender, _target, _message, _gasLimit);
|
||||
return IL2GasPriceOracle(_oracle).estimateCrossDomainMessageFee(_gasLimit);
|
||||
}
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -18,43 +18,14 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
|
||||
|
||||
/// @notice Emitted when current fee overhead is updated.
|
||||
/// @param overhead The current fee overhead updated.
|
||||
event OverheadUpdated(uint256 overhead);
|
||||
|
||||
/// @notice Emitted when current fee scalar is updated.
|
||||
/// @param scalar The current fee scalar updated.
|
||||
event ScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current l2 base fee is updated.
|
||||
/// @param l2BaseFee The current l2 base fee updated.
|
||||
event L2BaseFeeUpdated(uint256 l2BaseFee);
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev The precision used in the scalar.
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
|
||||
/// @dev The maximum possible l1 fee overhead.
|
||||
/// Computed based on current l1 block gas limit.
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
|
||||
/// @dev The maximum possible l1 fee scale.
|
||||
/// x1000 should be enough.
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The current l1 fee overhead.
|
||||
uint256 public overhead;
|
||||
|
||||
/// @notice The current l1 fee scalar.
|
||||
uint256 public scalar;
|
||||
|
||||
/// @notice The latest known l2 base fee.
|
||||
uint256 public l2BaseFee;
|
||||
|
||||
@@ -73,46 +44,9 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the current l1 base fee.
|
||||
function l1BaseFee() public view returns (uint256) {
|
||||
return block.basefee;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
function estimateCrossDomainMessageFee(
|
||||
address,
|
||||
address,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external view override returns (uint256) {
|
||||
unchecked {
|
||||
uint256 _l1GasUsed = getL1GasUsed(_message);
|
||||
uint256 _rollupFee = (_l1GasUsed * l1BaseFee() * scalar) / PRECISION;
|
||||
uint256 _l2Fee = _gasLimit * l2BaseFee;
|
||||
return _l2Fee + _rollupFee;
|
||||
}
|
||||
}
|
||||
|
||||
/// @notice Computes the amount of L1 gas used for a transaction. Adds the overhead which
|
||||
/// represents the per-transaction gas overhead of posting the transaction and state
|
||||
/// roots to L1. Adds 68 bytes of padding to account for the fact that the input does
|
||||
/// not have a signature.
|
||||
/// @param _data Unsigned fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function getL1GasUsed(bytes memory _data) public view returns (uint256) {
|
||||
uint256 _total = 0;
|
||||
uint256 _length = _data.length;
|
||||
unchecked {
|
||||
for (uint256 i = 0; i < _length; i++) {
|
||||
if (_data[i] == 0) {
|
||||
_total += 4;
|
||||
} else {
|
||||
_total += 16;
|
||||
}
|
||||
}
|
||||
uint256 _unsigned = _total + overhead;
|
||||
return _unsigned + (68 * 16);
|
||||
}
|
||||
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view override returns (uint256) {
|
||||
return _gasLimit * l2BaseFee;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
@@ -133,24 +67,6 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar The new scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
require(_scalar <= MAX_SCALE, "exceed maximum scale");
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
|
||||
@@ -264,6 +264,13 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
emit CommitBatch(publicInputHash);
|
||||
|
||||
// As we are getting close to sunsetting the Alpha testnet,
|
||||
// we now auto-finalize every batch to enable withdrawals.
|
||||
lastFinalizedBatchHash = publicInputHash;
|
||||
finalizedBatches[_batch.batchIndex] = publicInputHash;
|
||||
_batchInStorage.finalized = true;
|
||||
emit FinalizeBatch(publicInputHash);
|
||||
|
||||
return publicInputHash;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,10 +79,10 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
|
||||
require(msg.value == 0, "nonzero msg.value");
|
||||
require(_l1Token == tokenMapping[_l2Token], "l1 token mismatch");
|
||||
|
||||
// @todo forward `_callData` to `_to` using transferAndCall in the near future
|
||||
|
||||
IScrollStandardERC20(_l2Token).mint(_to, _amount);
|
||||
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ contract L2ETHGateway is Initializable, ScrollGatewayBase, IL2ETHGateway {
|
||||
(bool _success, ) = _to.call{value: _amount}("");
|
||||
require(_success, "ETH transfer failed");
|
||||
|
||||
// @todo farward _data to `_to` in near future.
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeDepositETH(_from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -105,10 +105,10 @@ contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gate
|
||||
_deployL2Token(_deployData, _l1Token);
|
||||
}
|
||||
|
||||
// @todo forward `_callData` to `_to` using transferAndCall in the near future
|
||||
|
||||
IScrollStandardERC20(_l2Token).mint(_to, _amount);
|
||||
|
||||
_doCallback(_to, _callData);
|
||||
|
||||
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _callData);
|
||||
}
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ contract L2WETHGateway is Initializable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
IWETH(_l2Token).deposit{value: _amount}();
|
||||
IERC20(_l2Token).safeTransfer(_to, _amount);
|
||||
|
||||
// @todo forward `_data` to `_to` in near future
|
||||
_doCallback(_to, _data);
|
||||
|
||||
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ interface IScrollMessenger {
|
||||
*****************************/
|
||||
|
||||
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
|
||||
/// @param target The address of account who recieve the message.
|
||||
/// @param target The address of account who receive the message.
|
||||
/// @param value The amount of ether passed when call target contract.
|
||||
/// @param message The content of the message.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
|
||||
@@ -55,7 +55,7 @@ interface IScrollMessenger {
|
||||
) external payable;
|
||||
|
||||
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
|
||||
/// @param target The address of account who recieve the message.
|
||||
/// @param target The address of account who receive the message.
|
||||
/// @param value The amount of ether passed when call target contract.
|
||||
/// @param message The content of the message.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IScrollGatewayCallback {
|
||||
function onScrollGatewayCallback(bytes memory data) external;
|
||||
}
|
||||
@@ -4,6 +4,7 @@ pragma solidity ^0.8.0;
|
||||
|
||||
import {IScrollGateway} from "./IScrollGateway.sol";
|
||||
import {IScrollMessenger} from "../IScrollMessenger.sol";
|
||||
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
|
||||
|
||||
abstract contract ScrollGatewayBase is IScrollGateway {
|
||||
/*************
|
||||
@@ -83,4 +84,17 @@ abstract contract ScrollGatewayBase is IScrollGateway {
|
||||
// for reentrancy guard
|
||||
_status = _NOT_ENTERED;
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to forward calldata to target contract.
|
||||
/// @param _to The address of contract to call.
|
||||
/// @param _data The calldata passed to the contract.
|
||||
function _doCallback(address _to, bytes memory _data) internal {
|
||||
if (_data.length > 0 && _to.code.length > 0) {
|
||||
IScrollGatewayCallback(_to).onScrollGatewayCallback(_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ pragma solidity ^0.8.0;
|
||||
import {ERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/ERC20Upgradeable.sol";
|
||||
import {ERC20PermitUpgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/draft-ERC20PermitUpgradeable.sol";
|
||||
import {IScrollStandardERC20} from "./IScrollStandardERC20.sol";
|
||||
import {IERC677Receiver} from "./IERC677Receiver.sol";
|
||||
import {IERC677Receiver} from "../callbacks/IERC677Receiver.sol";
|
||||
|
||||
contract ScrollStandardERC20 is ERC20PermitUpgradeable, IScrollStandardERC20 {
|
||||
/// @inheritdoc IScrollStandardERC20
|
||||
|
||||
@@ -12,6 +12,7 @@ import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
|
||||
|
||||
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L1CustomERC20GatewayTest is L1GatewayTestBase {
|
||||
// from L1CustomERC20Gateway
|
||||
@@ -250,12 +251,10 @@ contract L1CustomERC20GatewayTest is L1GatewayTestBase {
|
||||
|
||||
function testFinalizeWithdrawERC20(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
gateway.updateTokenMapping(address(l1Token), address(l2Token));
|
||||
|
||||
@@ -270,7 +269,7 @@ contract L1CustomERC20GatewayTest is L1GatewayTestBase {
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -291,7 +290,7 @@ contract L1CustomERC20GatewayTest is L1GatewayTestBase {
|
||||
// emit FinalizeWithdrawERC20 from L1StandardERC20Gateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeWithdrawERC20(address(l1Token), address(l2Token), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeWithdrawERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L1ScrollMessenger
|
||||
@@ -301,11 +300,11 @@ contract L1CustomERC20GatewayTest is L1GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 gatewayBalance = l1Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l1Token.balanceOf(recipient);
|
||||
uint256 recipientBalance = l1Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), 0, 0, message, proof);
|
||||
assertEq(gatewayBalance - amount, l1Token.balanceOf(address(gateway)));
|
||||
assertEq(recipientBalance + amount, l1Token.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l1Token.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
|
||||
|
||||
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L1ETHGatewayTest is L1GatewayTestBase {
|
||||
// from L1ETHGateway
|
||||
@@ -188,12 +189,10 @@ contract L1ETHGatewayTest is L1GatewayTestBase {
|
||||
|
||||
function testFinalizeWithdrawETH(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
hevm.assume(recipient.code.length == 0);
|
||||
hevm.assume(uint256(uint160(recipient)) > 100); // ignore some precompile contracts
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, address(this).balance / 2);
|
||||
|
||||
@@ -204,7 +203,7 @@ contract L1ETHGatewayTest is L1GatewayTestBase {
|
||||
bytes memory message = abi.encodeWithSelector(
|
||||
IL1ETHGateway.finalizeWithdrawETH.selector,
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -225,7 +224,7 @@ contract L1ETHGatewayTest is L1GatewayTestBase {
|
||||
// emit FinalizeWithdrawETH from L1ETHGateway
|
||||
{
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit FinalizeWithdrawETH(sender, recipient, amount, dataToCall);
|
||||
emit FinalizeWithdrawETH(sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L1ScrollMessenger
|
||||
@@ -235,11 +234,11 @@ contract L1ETHGatewayTest is L1GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 messengerBalance = address(l1Messenger).balance;
|
||||
uint256 recipientBalance = recipient.balance;
|
||||
uint256 recipientBalance = address(recipient).balance;
|
||||
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), amount, 0, message, proof);
|
||||
assertEq(messengerBalance - amount, address(l1Messenger).balance);
|
||||
assertEq(recipientBalance + amount, recipient.balance);
|
||||
assertEq(recipientBalance + amount, address(recipient).balance);
|
||||
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {TransferReentrantToken} from "./mocks/tokens/TransferReentrantToken.sol";
|
||||
import {FeeOnTransferToken} from "./mocks/tokens/FeeOnTransferToken.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L1StandardERC20GatewayTest is L1GatewayTestBase {
|
||||
// from L1StandardERC20Gateway
|
||||
@@ -326,13 +327,10 @@ contract L1StandardERC20GatewayTest is L1GatewayTestBase {
|
||||
|
||||
function testFinalizeWithdrawERC20(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
hevm.assume(recipient != address(gateway));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, l1Token.balanceOf(address(this)));
|
||||
|
||||
@@ -345,7 +343,7 @@ contract L1StandardERC20GatewayTest is L1GatewayTestBase {
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -366,7 +364,7 @@ contract L1StandardERC20GatewayTest is L1GatewayTestBase {
|
||||
// emit FinalizeWithdrawERC20 from L1StandardERC20Gateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeWithdrawERC20(address(l1Token), address(l2Token), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeWithdrawERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L1ScrollMessenger
|
||||
@@ -376,11 +374,11 @@ contract L1StandardERC20GatewayTest is L1GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 gatewayBalance = l1Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l1Token.balanceOf(recipient);
|
||||
uint256 recipientBalance = l1Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), 0, 0, message, proof);
|
||||
assertEq(gatewayBalance - amount, l1Token.balanceOf(address(gateway)));
|
||||
assertEq(recipientBalance + amount, l1Token.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l1Token.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
|
||||
|
||||
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L1WETHGatewayTest is L1GatewayTestBase {
|
||||
// from L1WETHGateway
|
||||
@@ -278,12 +279,10 @@ contract L1WETHGatewayTest is L1GatewayTestBase {
|
||||
|
||||
function testFinalizeWithdrawERC20(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, l1weth.balanceOf(address(this)));
|
||||
|
||||
@@ -296,7 +295,7 @@ contract L1WETHGatewayTest is L1GatewayTestBase {
|
||||
address(l1weth),
|
||||
address(l2weth),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -317,7 +316,7 @@ contract L1WETHGatewayTest is L1GatewayTestBase {
|
||||
// emit FinalizeWithdrawERC20 from L1WETHGateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeWithdrawERC20(address(l1weth), address(l2weth), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeWithdrawERC20(address(l1weth), address(l2weth), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L1ScrollMessenger
|
||||
@@ -327,11 +326,11 @@ contract L1WETHGatewayTest is L1GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 messengerBalance = address(l1Messenger).balance;
|
||||
uint256 recipientBalance = l1weth.balanceOf(recipient);
|
||||
uint256 recipientBalance = l1weth.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), amount, 0, message, proof);
|
||||
assertEq(messengerBalance - amount, address(l1Messenger).balance);
|
||||
assertEq(recipientBalance + amount, l1weth.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l1weth.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import {L2GatewayRouter} from "../L2/gateways/L2GatewayRouter.sol";
|
||||
|
||||
import {L2GatewayTestBase} from "./L2GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L2CustomERC20GatewayTest is L2GatewayTestBase {
|
||||
// from L1CustomERC20Gateway
|
||||
@@ -231,13 +232,10 @@ contract L2CustomERC20GatewayTest is L2GatewayTestBase {
|
||||
|
||||
function testFinalizeDepositERC20(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
hevm.assume(recipient != address(gateway));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
gateway.updateTokenMapping(address(l2Token), address(l1Token));
|
||||
|
||||
@@ -249,7 +247,7 @@ contract L2CustomERC20GatewayTest is L2GatewayTestBase {
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -265,7 +263,7 @@ contract L2CustomERC20GatewayTest is L2GatewayTestBase {
|
||||
// emit FinalizeDepositERC20 from L2CustomERC20Gateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeDepositERC20(address(l1Token), address(l2Token), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeDepositERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L2ScrollMessenger
|
||||
@@ -275,11 +273,11 @@ contract L2CustomERC20GatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 gatewayBalance = l2Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l2Token.balanceOf(recipient);
|
||||
uint256 recipientBalance = l2Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l2Messenger.relayMessage(address(counterpartGateway), address(gateway), 0, 0, message);
|
||||
assertEq(gatewayBalance, l2Token.balanceOf(address(gateway)));
|
||||
assertEq(recipientBalance + amount, l2Token.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l2Token.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import {IL2ETHGateway, L2ETHGateway} from "../L2/gateways/L2ETHGateway.sol";
|
||||
|
||||
import {L2GatewayTestBase} from "./L2GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
// from L2ETHGateway
|
||||
@@ -184,8 +185,7 @@ contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
hevm.assume(recipient.code.length == 0);
|
||||
hevm.assume(uint256(uint160(recipient)) > 100); // ignore some precompile contracts
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, address(this).balance / 2);
|
||||
|
||||
@@ -196,7 +196,7 @@ contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
bytes memory message = abi.encodeWithSelector(
|
||||
IL2ETHGateway.finalizeDepositETH.selector,
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -212,7 +212,7 @@ contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
// emit FinalizeDepositETH from L2ETHGateway
|
||||
{
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit FinalizeDepositETH(sender, recipient, amount, dataToCall);
|
||||
emit FinalizeDepositETH(sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L2ScrollMessenger
|
||||
@@ -222,11 +222,11 @@ contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 messengerBalance = address(l2Messenger).balance;
|
||||
uint256 recipientBalance = recipient.balance;
|
||||
uint256 recipientBalance = address(recipient).balance;
|
||||
assertBoolEq(false, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l2Messenger.relayMessage(address(counterpartGateway), address(gateway), amount, 0, message);
|
||||
assertEq(messengerBalance - amount, address(l2Messenger).balance);
|
||||
assertEq(recipientBalance + amount, recipient.balance);
|
||||
assertEq(recipientBalance + amount, address(recipient).balance);
|
||||
assertBoolEq(true, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import {ScrollStandardERC20Factory} from "../libraries/token/ScrollStandardERC20
|
||||
|
||||
import {L2GatewayTestBase} from "./L2GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L2StandardERC20GatewayTest is L2GatewayTestBase {
|
||||
// from L2StandardERC20Gateway
|
||||
@@ -263,13 +264,10 @@ contract L2StandardERC20GatewayTest is L2GatewayTestBase {
|
||||
|
||||
function testFinalizeDepositERC20(
|
||||
address sender,
|
||||
address recipient,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
hevm.assume(recipient != address(gateway));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, l2Token.balanceOf(address(this)));
|
||||
|
||||
@@ -279,7 +277,7 @@ contract L2StandardERC20GatewayTest is L2GatewayTestBase {
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -295,7 +293,7 @@ contract L2StandardERC20GatewayTest is L2GatewayTestBase {
|
||||
// emit FinalizeDepositERC20 from L2StandardERC20Gateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeDepositERC20(address(l1Token), address(l2Token), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeDepositERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L2ScrollMessenger
|
||||
@@ -305,11 +303,11 @@ contract L2StandardERC20GatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 gatewayBalance = l2Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l2Token.balanceOf(recipient);
|
||||
uint256 recipientBalance = l2Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l2Messenger.relayMessage(address(counterpartGateway), address(gateway), 0, 0, message);
|
||||
assertEq(gatewayBalance, l2Token.balanceOf(address(gateway)));
|
||||
assertEq(recipientBalance + amount, l2Token.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l2Token.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import {IL2ERC20Gateway, L2WETHGateway} from "../L2/gateways/L2WETHGateway.sol";
|
||||
|
||||
import {L2GatewayTestBase} from "./L2GatewayTestBase.t.sol";
|
||||
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L2WETHGatewayTest is L2GatewayTestBase {
|
||||
// from L2WETHGateway
|
||||
@@ -275,8 +276,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
// blacklist some addresses
|
||||
hevm.assume(recipient != address(0));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
|
||||
amount = bound(amount, 1, l2weth.balanceOf(address(this)));
|
||||
|
||||
@@ -289,7 +289,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
|
||||
address(l1weth),
|
||||
address(l2weth),
|
||||
sender,
|
||||
recipient,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
@@ -305,7 +305,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
|
||||
// emit FinalizeDepositERC20 from L2WETHGateway
|
||||
{
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeDepositERC20(address(l1weth), address(l2weth), sender, recipient, amount, dataToCall);
|
||||
emit FinalizeDepositERC20(address(l1weth), address(l2weth), sender, address(recipient), amount, dataToCall);
|
||||
}
|
||||
|
||||
// emit RelayedMessage from L2ScrollMessenger
|
||||
@@ -315,11 +315,11 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
uint256 messengerBalance = address(l2Messenger).balance;
|
||||
uint256 recipientBalance = l2weth.balanceOf(recipient);
|
||||
uint256 recipientBalance = l2weth.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l2Messenger.relayMessage(address(counterpartGateway), address(gateway), amount, 0, message);
|
||||
assertEq(messengerBalance - amount, address(l2Messenger).balance);
|
||||
assertEq(recipientBalance + amount, l2weth.balanceOf(recipient));
|
||||
assertEq(recipientBalance + amount, l2weth.balanceOf(address(recipient)));
|
||||
assertBoolEq(true, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
}
|
||||
|
||||
|
||||
15
contracts/src/test/mocks/MockGatewayRecipient.sol
Normal file
15
contracts/src/test/mocks/MockGatewayRecipient.sol
Normal file
@@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {IScrollGatewayCallback} from "../../libraries/callbacks/IScrollGatewayCallback.sol";
|
||||
|
||||
contract MockGatewayRecipient is IScrollGatewayCallback {
|
||||
event ReceiveCall(bytes data);
|
||||
|
||||
function onScrollGatewayCallback(bytes memory data) external {
|
||||
emit ReceiveCall(data);
|
||||
}
|
||||
|
||||
receive() external payable {}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ test:
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -i cp {} ./verifier/lib/
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./verifier/lib/
|
||||
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# Coordinator
|
||||
|
||||
This repo contains the Scroll coordinator.
|
||||
This directory contains the Scroll Coordinator module.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
See [monorepo prerequisites](../README.md#prerequisites).
|
||||
|
||||
|
||||
## Build
|
||||
|
||||
@@ -8,17 +14,67 @@ This repo contains the Scroll coordinator.
|
||||
make clean
|
||||
make coordinator
|
||||
```
|
||||
The built coordinator binary is in the `build/bin` directory.
|
||||
|
||||
|
||||
## Test
|
||||
|
||||
Note: Our coordinator test code doesn't currently support Apple Silicon (M1/M2) chips.
|
||||
|
||||
When developing the coordinator, use the following command to mock verifier results and run coordinator tests:
|
||||
|
||||
```bash
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
```
|
||||
Instead of using verifier/verifier.go, it will use verifier/mock.go to always return true.
|
||||
|
||||
Lint the files before testing or committing:
|
||||
|
||||
```bash
|
||||
make lint
|
||||
```
|
||||
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `RollerManagerConfig` in [`config/config.go`](config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
* use default ports and config.json
|
||||
|
||||
* Using default ports and config.json:
|
||||
```bash
|
||||
./build/bin/coordinator --http
|
||||
```
|
||||
|
||||
* use specified ports and config.json
|
||||
|
||||
* Using manually specified ports and config.json:
|
||||
```bash
|
||||
./build/bin/coordinator --config ./config.json --http --http.addr localhost --http.port 8390
|
||||
```
|
||||
|
||||
* For other flags, refer to [`cmd/app/flags.go`](cmd/app/flags.go).
|
||||
|
||||
|
||||
## Codeflow
|
||||
|
||||
### cmd/app/app.go
|
||||
|
||||
This file defines the main entry point for the coordinator application, setting up the necessary modules, and handling graceful shutdowns. Upon loading config.json file, the coordinator (`cmd/app/app.go`) sets up and starts the HTTP and WebSocket servers using the configured ports and addresses. `flags.go` is used to parse the flags. Then, it creates a new `RollerManager` (`manager.go`) and starts listening.
|
||||
|
||||
### manager.go
|
||||
|
||||
`manager.go` calls `rollers.go` for prover (aka "roller") management functions. In the process, `rollers.go` calls `client.go`, initializing a prover client. For communication between prover clients and the coordinator manager, `api.go` is used.
|
||||
|
||||
`manager.go` uses either `verifier.go` or `mock.go` (for development/testing purposes) to verify the proofs submitted by provers. After verification, `manager.go` will call `roller.go` to update the state of the prover, and then return the result (whether the proof verification process was successful) to the prover.
|
||||
|
||||
### api.go
|
||||
|
||||
This file contains the implementation of the RPC API for the coordinator manager. The API allows prover clients to interact with the coordinator manager through functions such as `requestToken`, `register`, and `submitProof`.
|
||||
|
||||
### rollers.go
|
||||
|
||||
This file contains the logic for handling prover-specific tasks, such as assigning tasks to provers, handling completed tasks, and managing prover metrics.
|
||||
|
||||
### client/client.go
|
||||
|
||||
This file contains the `Client` struct that is callable on the prover side and responsible for communicating with the coordinator through RPC. `RequestToken`, `RegisterAndSubscribe`, and `SubmitProof` are used by `rollers.go`.
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
225
coordinator/api_test.go
Normal file
225
coordinator/api_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
)
|
||||
|
||||
func geneAuthMsg(t *testing.T) *message.AuthMsg {
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test1",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, authMsg.SignWithKey(privKey))
|
||||
return authMsg
|
||||
}
|
||||
|
||||
var rollerManager *Manager
|
||||
|
||||
func init() {
|
||||
rmConfig := config.RollerManagerConfig{}
|
||||
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
|
||||
rollerManager, _ = New(context.Background(), &rmConfig, nil, nil)
|
||||
}
|
||||
|
||||
func TestManager_RequestToken(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_request_token",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token has already been distributed", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
key, _ := tmpAuthMsg.PublicKey()
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
rollerManager.tokenCache.Set(key, tokenCacheStored, time.Hour)
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, token, tokenCacheStored)
|
||||
})
|
||||
|
||||
convey.Convey("token generation failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return "", errors.New("token generation failed")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token generation success", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return tokenCacheStored, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tokenCacheStored, token)
|
||||
})
|
||||
}
|
||||
|
||||
func TestManager_Register(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_register",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("verify token failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return false, errors.New("verify token failure")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("register failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "register", func(*Manager, string, *message.Identity) (<-chan *message.TaskMsg, error) {
|
||||
return nil, errors.New("register error")
|
||||
})
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("notifier failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
|
||||
return nil, false
|
||||
})
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
|
||||
assert.Equal(t, *subscription, rpc.Subscription{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestManager_SubmitProof(t *testing.T) {
|
||||
id := "10000"
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: id,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
|
||||
var rp rollerNode
|
||||
rp.TaskIDs = cmap.New()
|
||||
rp.TaskIDs.Set(id, id)
|
||||
|
||||
convey.Convey("verify failure", t, func() {
|
||||
var s *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return false, errors.New("proof verify error")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
isSuccess, err := rollerManager.SubmitProof(proof)
|
||||
assert.False(t, isSuccess)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("existTaskIDForRoller failure", t, func() {
|
||||
var s *cmap.ConcurrentMap
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return nil, true
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var pm *message.ProofMsg
|
||||
patchGuard.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
isSuccess, err := rollerManager.SubmitProof(proof)
|
||||
assert.False(t, isSuccess)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("handleZkProof failure", t, func() {
|
||||
var pm *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var s cmap.ConcurrentMap
|
||||
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return &rp, true
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
|
||||
return errors.New("handle zk proof error")
|
||||
})
|
||||
|
||||
isSuccess, err := rollerManager.SubmitProof(proof)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, isSuccess)
|
||||
})
|
||||
|
||||
convey.Convey("SubmitProof success", t, func() {
|
||||
var pm *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var s cmap.ConcurrentMap
|
||||
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return &rp, true
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
isSuccess, err := rollerManager.SubmitProof(proof)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, isSuccess)
|
||||
})
|
||||
}
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// Client defines typed wrappers for the Ethereum RPC API.
|
||||
|
||||
@@ -40,7 +40,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, "coordinator-test")
|
||||
utils.RegisterSimulation(app, utils.CoordinatorApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
103
coordinator/cmd/app/mock_app.go
Normal file
103
coordinator/cmd/app/mock_app.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
wsStartPort int64 = 40000
|
||||
)
|
||||
|
||||
// CoordinatorApp coordinator-test client manager.
|
||||
type CoordinatorApp struct {
|
||||
Config *coordinatorConfig.Config
|
||||
|
||||
base *docker.App
|
||||
|
||||
originFile string
|
||||
coordinatorFile string
|
||||
WSPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
}
|
||||
|
||||
// NewCoordinatorApp return a new coordinatorApp manager.
|
||||
func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
wsPort := port.Int64() + wsStartPort
|
||||
coordinatorApp := &CoordinatorApp{
|
||||
base: base,
|
||||
originFile: file,
|
||||
coordinatorFile: coordinatorFile,
|
||||
WSPort: wsPort,
|
||||
args: []string{"--log.debug", "--config", coordinatorFile, "--ws", "--ws.port", strconv.Itoa(int(wsPort))},
|
||||
}
|
||||
if err := coordinatorApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return coordinatorApp
|
||||
}
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
func (c *CoordinatorApp) Free() {
|
||||
if !utils.IsNil(c.AppAPI) {
|
||||
c.AppAPI.WaitExit()
|
||||
}
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
|
||||
// WSEndpoint returns ws endpoint.
|
||||
func (c *CoordinatorApp) WSEndpoint() string {
|
||||
return fmt.Sprintf("ws://localhost:%d", c.WSPort)
|
||||
}
|
||||
|
||||
// MockConfig creates a new coordinator config.
|
||||
func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
base := c.base
|
||||
cfg, err := coordinatorConfig.NewConfig(c.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Reset roller manager config for manager test cases.
|
||||
cfg.RollerManagerConfig = &coordinatorConfig.RollerManagerConfig{
|
||||
RollersPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 1,
|
||||
}
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
c.Config = cfg
|
||||
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.Marshal(c.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(c.coordinatorFile, data, 0644)
|
||||
}
|
||||
@@ -10,7 +10,8 @@
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
},
|
||||
"max_verifier_workers": 10
|
||||
"max_verifier_workers": 10,
|
||||
"order_session": "ASC"
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -3,6 +3,7 @@ module scroll-tech/coordinator
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
|
||||
@@ -11,6 +12,15 @@ require (
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -19,19 +29,23 @@ require (
|
||||
github.com/ethereum/go-ethereum v1.11.4 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.2 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -31,6 +33,9 @@ github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
@@ -44,6 +49,8 @@ github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBe
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -52,8 +59,10 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
@@ -69,8 +78,12 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -80,6 +93,12 @@ github.com/scroll-tech/zktrie v0.5.2 h1:U34jPXMLGOlRHfdvYp5VVgOcC0RuPeJmcS3bWotC
|
||||
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
|
||||
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
@@ -89,10 +108,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
@@ -101,20 +120,27 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -18,13 +18,13 @@ import (
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils/workerpool"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
)
|
||||
@@ -51,6 +51,7 @@ const (
|
||||
|
||||
type rollerProofStatus struct {
|
||||
id string
|
||||
typ message.ProveType
|
||||
pk string
|
||||
status types.RollerProveStatus
|
||||
}
|
||||
@@ -161,14 +162,30 @@ func (m *Manager) isRunning() bool {
|
||||
// Loop keeps the manager running.
|
||||
func (m *Manager) Loop() {
|
||||
var (
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
tasks []*types.BlockBatch
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
tasks []*types.BlockBatch
|
||||
aggTasks []*types.AggTask
|
||||
)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// load and send aggregator tasks
|
||||
if len(aggTasks) == 0 && m.orm != nil {
|
||||
var err error
|
||||
aggTasks, err = m.orm.GetUnassignedAggTasks()
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned aggregator proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select aggregator type roller and send message
|
||||
for len(aggTasks) > 0 && m.StartAggProofGenerationSession(aggTasks[0], nil) {
|
||||
aggTasks = aggTasks[1:]
|
||||
}
|
||||
|
||||
// load and send basic tasks
|
||||
if len(tasks) == 0 && m.orm != nil {
|
||||
var err error
|
||||
// TODO: add cache
|
||||
@@ -177,15 +194,15 @@ func (m *Manager) Loop() {
|
||||
fmt.Sprintf(
|
||||
"ORDER BY index %s LIMIT %d;",
|
||||
m.cfg.OrderSession,
|
||||
m.GetNumberOfIdleRollers(),
|
||||
m.GetNumberOfIdleRollers(message.BasicProve),
|
||||
),
|
||||
); err != nil {
|
||||
log.Error("failed to get unassigned proving tasks", "error", err)
|
||||
log.Error("failed to get unassigned basic proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select roller and send message
|
||||
for len(tasks) > 0 && m.StartProofGenerationSession(tasks[0], nil) {
|
||||
// Select basic type roller and send message
|
||||
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
|
||||
tasks = tasks[1:]
|
||||
}
|
||||
case <-m.ctx.Done():
|
||||
@@ -208,31 +225,51 @@ func (m *Manager) restorePrevSessions() {
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if hashes, err := m.orm.GetAssignedBatchHashes(); err != nil {
|
||||
log.Error("failed to get assigned batch hashes from db", "error", err)
|
||||
} else if prevSessions, err := m.orm.GetSessionInfosByHashes(hashes); err != nil {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
} else {
|
||||
for _, v := range prevSessions {
|
||||
sess := &session{
|
||||
info: v,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
var hashes []string
|
||||
// load assigned aggregator tasks from db
|
||||
aggTasks, err := m.orm.GetAssignedAggTasks()
|
||||
if err != nil {
|
||||
log.Error("failed to load assigned aggregator tasks from db", "error", err)
|
||||
return
|
||||
}
|
||||
for _, aggTask := range aggTasks {
|
||||
hashes = append(hashes, aggTask.ID)
|
||||
}
|
||||
// load assigned basic tasks from db
|
||||
batchHashes, err := m.orm.GetAssignedBatchHashes()
|
||||
if err != nil {
|
||||
log.Error("failed to get assigned batch batchHashes from db", "error", err)
|
||||
return
|
||||
}
|
||||
hashes = append(hashes, batchHashes...)
|
||||
|
||||
prevSessions, err := m.orm.GetSessionInfosByHashes(hashes)
|
||||
if err != nil {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
return
|
||||
}
|
||||
for _, v := range prevSessions {
|
||||
sess := &session{
|
||||
info: v,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"prove type", sess.info.ProveType,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// HandleZkProof handle a ZkProof submitted from a roller.
|
||||
@@ -257,7 +294,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
|
||||
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
@@ -268,6 +305,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
@@ -277,6 +315,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"proof time", proofTimeSec,
|
||||
)
|
||||
|
||||
@@ -284,8 +323,15 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
// Roll back current proof's status.
|
||||
if dbErr != nil {
|
||||
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task status as Unassigned", "msg.ID", msg.ID)
|
||||
if msg.Type == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset basic task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset aggregator task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
// set proof status
|
||||
@@ -294,16 +340,18 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
status = types.RollerProofValid
|
||||
}
|
||||
// notify the session that the roller finishes the proving process
|
||||
sess.finishChan <- rollerProofStatus{msg.ID, pk, status}
|
||||
sess.finishChan <- rollerProofStatus{msg.ID, msg.Type, pk, status}
|
||||
}()
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", msg.Type,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
)
|
||||
@@ -311,46 +359,67 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
// store proof content
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
if msg.Type == message.BasicProve {
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store basic proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update basic task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if dbErr = m.orm.UpdateProofForAggTask(msg.ID, msg.Proof); dbErr != nil {
|
||||
log.Error("failed to store aggregator proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
var err error
|
||||
success, err = m.verifyProof(msg.Proof)
|
||||
if err != nil {
|
||||
var verifyErr error
|
||||
// TODO: wrap both basic verifier and aggregator verifier
|
||||
success, verifyErr = m.verifyProof(msg.Proof)
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
} else {
|
||||
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
|
||||
}
|
||||
|
||||
if success {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
if msg.Type == message.AggregatorProve {
|
||||
if dbErr = m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update aggregator proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
if msg.Type == message.BasicProve {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update basic proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof time", proofTimeSec)
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof time", proofTimeSec)
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -366,7 +435,13 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
// Check if session can be replayed
|
||||
if sess.info.Attempts < m.cfg.SessionAttempts {
|
||||
if m.StartProofGenerationSession(nil, sess) {
|
||||
var success bool
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
success = m.StartAggProofGenerationSession(nil, sess)
|
||||
} else if sess.info.ProveType == message.BasicProve {
|
||||
success = m.StartBasicProofGenerationSession(nil, sess)
|
||||
}
|
||||
if success {
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
@@ -384,9 +459,17 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if sess.info.ProveType == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
}
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
@@ -401,9 +484,17 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
if sess.isSessionFailed() {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
if ret.typ == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update basic proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
if ret.typ == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
@@ -477,20 +568,21 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
}
|
||||
|
||||
// StartProofGenerationSession starts a proof generation session
|
||||
func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
// StartBasicProofGenerationSession starts a basic proof generation session
|
||||
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
var taskId string
|
||||
if task != nil {
|
||||
taskId = task.Hash
|
||||
} else {
|
||||
taskId = prevSession.info.ID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers() == 0 {
|
||||
log.Warn("no idle roller when starting proof generation session", "id", taskId)
|
||||
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskId)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start proof generation session", "id", taskId)
|
||||
log.Info("start basic proof generation session", "id", taskId)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
@@ -529,17 +621,17 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatch task to rollers.
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller()
|
||||
roller := m.selectRoller(message.BasicProve)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(taskId, traces) {
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskId, Type: message.BasicProve, Traces: traces}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
|
||||
continue
|
||||
}
|
||||
@@ -548,7 +640,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle rollers", m.GetNumberOfIdleRollers())
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -563,6 +655,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
|
||||
info: &types.SessionInfo{
|
||||
ID: taskId,
|
||||
Rollers: rollers,
|
||||
ProveType: message.BasicProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
@@ -576,6 +669,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
@@ -595,20 +689,110 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
|
||||
return true
|
||||
}
|
||||
|
||||
// IsRollerIdle determines whether this roller is idle.
|
||||
func (m *Manager) IsRollerIdle(hexPk string) bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// We need to iterate over all sessions because finished sessions will be deleted until the
|
||||
// timeout. So a busy roller could be marked as idle in a finished session.
|
||||
for _, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == hexPk && roller.Status == types.RollerAssigned {
|
||||
return false
|
||||
// StartAggProofGenerationSession starts an aggregator proof generation.
|
||||
func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSession *session) (success bool) {
|
||||
var taskId string
|
||||
if task != nil {
|
||||
taskId = task.ID
|
||||
} else {
|
||||
taskId = prevSession.info.ID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskId)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start aggregator proof generation session", "id", taskId)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskId, "err", err)
|
||||
} else if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskId, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
// get agg task from db
|
||||
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskId)
|
||||
if err != nil {
|
||||
log.Error("failed to get sub proofs for aggregator task", "id", taskId, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.AggregatorProve)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{
|
||||
ID: taskId,
|
||||
Type: message.AggregatorProve,
|
||||
SubProofs: subProofs,
|
||||
}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskId, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskId,
|
||||
Rollers: rollers,
|
||||
ProveType: message.AggregatorProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[taskId] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -623,7 +807,7 @@ func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
// GetValue returns nil if value is expired
|
||||
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
|
||||
return false, errors.New("failed to find corresponding token")
|
||||
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s.", authMsg.Identity.Name, pubkey)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -26,38 +28,33 @@ import (
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
coordinator_config "scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridge_config "scroll-tech/bridge/config"
|
||||
|
||||
coordinator_config "scroll-tech/coordinator/config"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *bridge_config.Config
|
||||
base *docker.App
|
||||
|
||||
base *docker.App
|
||||
batchData *types.BatchData
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
m.Run()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) (err error) {
|
||||
// Load config.
|
||||
cfg, err = bridge_config.NewConfig("../bridge/config.json")
|
||||
assert.NoError(t, err)
|
||||
base.RunImages(t)
|
||||
|
||||
// Create db container.
|
||||
cfg.DBConfig.DSN = base.DBEndpoint()
|
||||
|
||||
base.RunDBImage(t)
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -87,11 +84,11 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestSeveralConnections", testSeveralConnections)
|
||||
t.Run("TestValidProof", testValidProof)
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimedoutProof", testTimedoutProof)
|
||||
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
|
||||
// TODO: Restart roller alone when received task, can add this test case in integration-test.
|
||||
//t.Run("TestRollerReconnect", testRollerReconnect)
|
||||
t.Run("TestGracefulRestart", testGracefulRestart)
|
||||
t.Run("TestListRollers", testListRollers)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
@@ -100,15 +97,12 @@ func TestApis(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -117,19 +111,16 @@ func testHandshake(t *testing.T) {
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
defer roller.close()
|
||||
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
}
|
||||
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -154,7 +145,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
@@ -172,30 +163,27 @@ func testFailedHandshake(t *testing.T) {
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
token, err := client.RequestToken(ctx, authMsg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
authMsg.Identity.Token = token
|
||||
assert.NoError(t, authMsg.Sign(privkey))
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
|
||||
<-time.After(6 * time.Second)
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
}
|
||||
|
||||
func testSeveralConnections(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
// Reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -216,7 +204,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
assert.NoError(t, eg.Wait())
|
||||
|
||||
// check roller's idle connections
|
||||
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
@@ -230,7 +218,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
if rollerManager.GetNumberOfIdleRollers() == 0 {
|
||||
if rollerManager.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -242,14 +230,14 @@ func testSeveralConnections(t *testing.T) {
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -260,7 +248,11 @@ func testValidProof(t *testing.T) {
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
// only roller 0 submits valid proof.
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, i == 0)
|
||||
proofStatus := verifiedSuccess
|
||||
if i > 0 {
|
||||
proofStatus = generatedFailed
|
||||
}
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
@@ -268,7 +260,7 @@ func testValidProof(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
@@ -301,14 +293,14 @@ func testValidProof(t *testing.T) {
|
||||
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -318,7 +310,7 @@ func testInvalidProof(t *testing.T) {
|
||||
rollers := make([]*mockRoller, 3)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, false)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
@@ -326,7 +318,65 @@ func testInvalidProof(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == types.ProvingTaskFailed {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 3, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
@@ -359,14 +409,14 @@ func testInvalidProof(t *testing.T) {
|
||||
|
||||
func testTimedoutProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -378,7 +428,7 @@ func testTimedoutProof(t *testing.T) {
|
||||
// close connection
|
||||
roller1.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
@@ -411,12 +461,12 @@ func testTimedoutProof(t *testing.T) {
|
||||
|
||||
// create second mock roller, that will send valid proof.
|
||||
roller2 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL)
|
||||
roller2.waitTaskAndSendProof(t, time.Second, false, true)
|
||||
roller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
defer func() {
|
||||
// close connection
|
||||
roller2.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
// wait manager to finish first CollectProofs
|
||||
<-time.After(60 * time.Second)
|
||||
@@ -439,14 +489,14 @@ func testTimedoutProof(t *testing.T) {
|
||||
|
||||
func testIdleRollerSelection(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -456,7 +506,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
rollers := make([]*mockRoller, 20)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, true)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
@@ -465,7 +515,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers())
|
||||
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers(message.BasicProve))
|
||||
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
@@ -498,7 +548,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
|
||||
func testGracefulRestart(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
l2db, err := database.NewOrmFactory(base.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
@@ -514,12 +564,12 @@ func testGracefulRestart(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
// wait 10 seconds, coordinator restarts before roller submits proof
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false, true)
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
|
||||
|
||||
// wait for coordinator to dispatch task
|
||||
<-time.After(5 * time.Second)
|
||||
@@ -531,7 +581,7 @@ func testGracefulRestart(t *testing.T) {
|
||||
rollerManager.Stop()
|
||||
|
||||
// Setup new coordinator and ws server.
|
||||
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
newRollerManager, newHandler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
newHandler.Shutdown(context.Background())
|
||||
newRollerManager.Stop()
|
||||
@@ -549,7 +599,7 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// will overwrite the roller client for `SubmitProof`
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, true)
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, verifiedSuccess)
|
||||
defer roller.close()
|
||||
|
||||
// verify proof status
|
||||
@@ -575,6 +625,54 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testListRollers(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, base.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
var names = []string{
|
||||
"roller_test_1",
|
||||
"roller_test_2",
|
||||
"roller_test_3",
|
||||
}
|
||||
|
||||
roller1 := newMockRoller(t, names[0], wsURL)
|
||||
roller2 := newMockRoller(t, names[1], wsURL)
|
||||
roller3 := newMockRoller(t, names[2], wsURL)
|
||||
defer func() {
|
||||
roller1.close()
|
||||
roller2.close()
|
||||
}()
|
||||
|
||||
// test ListRollers API
|
||||
rollers, err := rollerManager.ListRollers()
|
||||
assert.NoError(t, err)
|
||||
var rollersName []string
|
||||
for _, roller := range rollers {
|
||||
rollersName = append(rollersName, roller.Name)
|
||||
}
|
||||
sort.Strings(rollersName)
|
||||
assert.True(t, reflect.DeepEqual(names, rollersName))
|
||||
|
||||
// test ListRollers if one roller closed.
|
||||
roller3.close()
|
||||
rollers, err = rollerManager.ListRollers()
|
||||
assert.NoError(t, err)
|
||||
var newRollersName []string
|
||||
for _, roller := range rollers {
|
||||
newRollersName = append(newRollersName, roller.Name)
|
||||
}
|
||||
sort.Strings(newRollersName)
|
||||
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
// Get db handler.
|
||||
db, err := database.NewOrmFactory(dbCfg)
|
||||
@@ -644,14 +742,14 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
_ = authMsg.Sign(r.privKey)
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
token, err := client.RequestToken(context.Background(), authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authMsg.Identity.Token = token
|
||||
_ = authMsg.Sign(r.privKey)
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
if err != nil {
|
||||
@@ -669,8 +767,16 @@ func (r *mockRoller) releaseTasks() {
|
||||
})
|
||||
}
|
||||
|
||||
type proofStatus uint32
|
||||
|
||||
const (
|
||||
verifiedSuccess proofStatus = iota
|
||||
verifiedFailed
|
||||
generatedFailed
|
||||
)
|
||||
|
||||
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, validProof bool) {
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
if reconnect {
|
||||
@@ -686,10 +792,10 @@ func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration,
|
||||
r.releaseTasks()
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
go r.loop(t, r.client, proofTime, validProof, r.stopCh)
|
||||
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
|
||||
}
|
||||
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, validProof bool, stopCh chan struct{}) {
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case task := <-r.taskCh:
|
||||
@@ -707,8 +813,10 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
if !validProof {
|
||||
if proofStatus == generatedFailed {
|
||||
proof.Status = message.StatusProofError
|
||||
} else if proofStatus == verifiedFailed {
|
||||
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
ok, err := client.SubmitProof(context.Background(), proof)
|
||||
|
||||
@@ -3,7 +3,6 @@ package coordinator
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
@@ -20,8 +19,6 @@ func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string)
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
} else {
|
||||
log.Error("rollerProofsLastFinishedTimestampGauge is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,8 +28,6 @@ func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string)
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
} else {
|
||||
log.Error("rollerProofsLastAssignedTimestampGauge is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -42,8 +37,6 @@ func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
} else {
|
||||
log.Error("rollerProofsVerifiedSuccessTimeTimer is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -53,8 +46,15 @@ func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d t
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
|
||||
} else {
|
||||
log.Error("rollerProofsVerifiedFailedTimeTimer is nil", "roller pk", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,19 +7,20 @@ import (
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// rollerNode records roller status and send task to connected roller.
|
||||
type rollerNode struct {
|
||||
// Roller name
|
||||
Name string
|
||||
// Roller type
|
||||
Type message.ProveType
|
||||
// Roller public key
|
||||
PublicKey string
|
||||
// Roller version
|
||||
@@ -36,13 +37,10 @@ type rollerNode struct {
|
||||
*rollerMetrics
|
||||
}
|
||||
|
||||
func (r *rollerNode) sendTask(id string, traces []*geth_types.BlockTrace) bool {
|
||||
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
|
||||
select {
|
||||
case r.taskChan <- &message.TaskMsg{
|
||||
ID: id,
|
||||
Traces: traces,
|
||||
}:
|
||||
r.TaskIDs.Set(id, struct{}{})
|
||||
case r.taskChan <- msg:
|
||||
r.TaskIDs.Set(msg.ID, struct{}{})
|
||||
default:
|
||||
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
|
||||
return false
|
||||
@@ -77,6 +75,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
|
||||
}
|
||||
node = &rollerNode{
|
||||
Name: identity.Name,
|
||||
Type: identity.RollerType,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
@@ -88,7 +87,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
|
||||
roller := node.(*rollerNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(roller.registerTime) < 60 {
|
||||
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public key", pubkey)
|
||||
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
|
||||
return nil, fmt.Errorf("roller reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
@@ -117,31 +116,27 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
|
||||
}
|
||||
|
||||
// GetNumberOfIdleRollers return the count of idle rollers.
|
||||
func (m *Manager) GetNumberOfIdleRollers() (count int) {
|
||||
for i, pk := range m.rollerPool.Keys() {
|
||||
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count int) {
|
||||
for _, pk := range m.rollerPool.Keys() {
|
||||
if val, ok := m.rollerPool.Get(pk); ok {
|
||||
r := val.(*rollerNode)
|
||||
if r.TaskIDs.Count() == 0 {
|
||||
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
|
||||
count++
|
||||
}
|
||||
} else {
|
||||
log.Error("rollerPool Get fail", "pk", pk, "idx", i, "pk len", pk)
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *Manager) selectRoller() *rollerNode {
|
||||
func (m *Manager) selectRoller(rollerType message.ProveType) *rollerNode {
|
||||
pubkeys := m.rollerPool.Keys()
|
||||
for len(pubkeys) > 0 {
|
||||
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
|
||||
if val, ok := m.rollerPool.Get(pubkeys[idx.Int64()]); ok {
|
||||
r := val.(*rollerNode)
|
||||
if r.TaskIDs.Count() == 0 {
|
||||
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
|
||||
return r
|
||||
}
|
||||
} else {
|
||||
log.Error("rollerPool Get fail", "pk", pubkeys[idx.Int64()], "idx", idx.Int64(), "pk len", len(pubkeys))
|
||||
}
|
||||
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user