Compare commits

...

13 Commits

Author SHA1 Message Date
Péter Garamvölgyi
ff30622314 feat: auto-finalize batches on Alpha testnet (#791) 2023-08-15 15:41:54 +08:00
HAOYUatHZ
4bca06a4c1 Merge remote-tracking branch 'origin/develop' into alpha 2023-05-09 15:52:48 +08:00
georgehao
e086c2739f test(bridge): add watcher tests (#451)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-05-09 15:46:24 +08:00
Max Wolff
4dfa81d890 feat(contracts): remove L1 Fee in Oracle (#454) 2023-05-09 15:09:42 +08:00
HAOYUatHZ
4549439dab chore(coordinator): add more logs to VerifyToken (#456) 2023-05-08 22:30:52 +08:00
maskpp
b4d8e5ffd7 test:bump l2geth docker version (#455) 2023-05-08 19:46:21 +08:00
Lawliet-Chan
5b49c81692 feat(roller&coordinator): add aggregation roller communication protocol (#432)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-05-06 20:19:43 +08:00
Péter Garamvölgyi
071a7772dd fix already executed revert message (#407)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-03 16:19:40 +02:00
Péter Garamvölgyi
9d61149311 Merge tag 'v3.0.1' into alpha
fetch block transaction data instead of trace
2023-04-03 15:32:10 +02:00
HAOYUatHZ
0b607507b7 Merge pull request #381 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-21 21:36:04 +08:00
HAOYUatHZ
462e85e591 Merge pull request #372 from scroll-tech/develop
merge develop into alpha
2023-03-18 16:24:42 +08:00
HAOYUatHZ
f46391c92b Merge pull request #370 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-16 10:29:48 +08:00
HAOYUatHZ
10cececb60 Merge pull request #367 from scroll-tech/develop
merge develop into alpha
2023-03-15 09:07:27 +08:00
32 changed files with 1406 additions and 242 deletions

View File

@@ -104,7 +104,7 @@ var L1MessageQueueMetaData = &bind.MetaData{
// L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract.
var L2GasPriceOracleMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2ScrollMessengerMetaData contains all meta data concerning the L2ScrollMessenger contract.

View File

@@ -7,6 +7,7 @@ require (
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0
@@ -22,8 +23,10 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -35,6 +38,7 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect

View File

@@ -36,6 +36,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
@@ -49,6 +51,7 @@ github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBe
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -96,7 +99,11 @@ github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ7
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=

View File

@@ -36,7 +36,7 @@ func testBatchProposerProposeBatch(t *testing.T) {
commitCalldataSizeLimit: 500,
orm: db,
}
patchGuard1 := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
patchGuard := gomonkey.ApplyMethodFunc(p.orm, "GetL2WrappedBlocks", func(fields map[string]interface{}, args ...string) ([]*types.WrappedBlock, error) {
hash, _ := fields["hash"].(string)
if hash == "blockWithLongData" {
longData := strings.Repeat("0", 1000)
@@ -52,11 +52,10 @@ func testBatchProposerProposeBatch(t *testing.T) {
}},
}}, nil
})
defer patchGuard1.Reset()
patchGuard2 := gomonkey.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
return nil
})
defer patchGuard2.Reset()
block1 := &types.BlockInfo{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
block2 := &types.BlockInfo{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}

View File

@@ -101,6 +101,24 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
}
}
// ProcessedBlockHeight get processedBlockHeight
// Currently only use for unit test
func (w *L1WatcherClient) ProcessedBlockHeight() uint64 {
return w.processedBlockHeight
}
// Confirmations get confirmations
// Currently only use for unit test
func (w *L1WatcherClient) Confirmations() rpc.BlockNumber {
return w.confirmations
}
// SetConfirmations set the confirmations for L1WatcherClient
// Currently only use for unit test
func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
w.confirmations = confirmations
}
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1

View File

@@ -2,21 +2,34 @@ package watcher
import (
"context"
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
commonTypes "scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testStartWatcher(t *testing.T) {
// Create db handler and reset db.
func setupL1Watcher(t *testing.T) (*L1WatcherClient, database.OrmFactory) {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
@@ -25,4 +38,477 @@ func testStartWatcher(t *testing.T) {
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
return watcher, db
}
func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
assert.NoError(t, watcher.FetchContractEvent())
}
func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 {
blockHeight = 0
} else {
blockHeight = watcher.ProcessedBlockHeight() - 1
}
err := watcher.FetchBlockHeader(blockHeight)
assert.NoError(t, err)
})
convey.Convey("test get header from client error", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
return nil, ethereum.NotFound
})
defer patchGuard.Reset()
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.Error(t, err)
})
convey.Convey("insert l1 block error", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
return errors.New("insert failed")
})
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.Error(t, err)
})
convey.Convey("fetch block header success", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
patchGuard.ApplyMethodFunc(db, "InsertL1Blocks", func(ctx context.Context, blocks []*commonTypes.L1BlockInfo) error {
return nil
})
var blockHeight uint64 = 10
err := watcher.FetchBlockHeader(blockHeight)
assert.NoError(t, err)
})
}
func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
return nil, ethereum.NotFound
})
defer patchGuard.Reset()
err := watcher.FetchContractEvent()
assert.Error(t, err)
})
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
if height == nil {
height = big.NewInt(100)
}
t.Log(height)
return &types.Header{
Number: big.NewInt(100),
BaseFee: big.NewInt(100),
}, nil
})
defer patchGuard.Reset()
convey.Convey("filter logs failure", t, func() {
targetErr := errors.New("call filter failure")
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
return nil, targetErr
})
err := watcher.FetchContractEvent()
assert.EqualError(t, err, targetErr.Error())
})
patchGuard.ApplyMethodFunc(c, "FilterLogs", func(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
return []types.Log{
{
Address: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
}, nil
})
convey.Convey("parse bridge event logs failure", t, func() {
targetErr := errors.New("parse log failure")
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
return nil, nil, nil, targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error())
})
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []geth_types.Log) ([]*commonTypes.L1Message, []relayedMessage, []rollupEvent, error) {
rollupEvents := []rollupEvent{
{
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
status: commonTypes.RollupFinalized,
},
{
batchHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
status: commonTypes.RollupCommitted,
},
}
relayedMessageEvents := []relayedMessage{
{
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
isSuccessful: true,
},
{
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
isSuccessful: false,
},
}
return nil, relayedMessageEvents, rollupEvents, nil
})
convey.Convey("db get rollup status by hash list failure", t, func() {
targetErr := errors.New("get db failure")
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
return nil, targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, err.Error(), targetErr.Error())
})
convey.Convey("rollup status mismatch batch hashes length", t, func() {
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{
commonTypes.RollupFinalized,
}
return s, nil
})
err := watcher.FetchContractEvent()
assert.NoError(t, err)
})
patchGuard.ApplyMethodFunc(db, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
s := []commonTypes.RollupStatus{
commonTypes.RollupPending,
commonTypes.RollupCommitting,
}
return s, nil
})
convey.Convey("db update RollupFinalized status failure", t, func() {
targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure")
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil
})
convey.Convey("db update RollupCommitted status failure", t, func() {
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure")
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
return nil
})
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return nil
})
convey.Convey("db save l1 message failure", t, func() {
targetErr := errors.New("SaveL1Messages failure")
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(db, "SaveL1Messages", func(context.Context, []*commonTypes.L1Message) error {
return nil
})
convey.Convey("FetchContractEvent success", t, func() {
err := watcher.FetchContractEvent()
assert.NoError(t, err)
})
}
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1QueueTransactionEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack QueueTransaction log failure", t, func() {
targetErr := errors.New("UnpackLog QueueTransaction failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1QueueTransactionEventSignature success", t, func() {
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1QueueTransactionEvent)
tmpOut.QueueIndex = big.NewInt(100)
tmpOut.Data = []byte("test data")
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
tmpOut.Value = big.NewInt(1000)
tmpOut.Target = common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
tmpOut.GasLimit = big.NewInt(10)
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
assert.Len(t, l2Messages, 1)
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
})
}
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1CommitBatchEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack CommitBatch log failure", t, func() {
targetErr := errors.New("UnpackLog CommitBatch failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1CommitBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1CommitBatchEvent)
tmpOut.BatchHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
})
}
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer db.Close()
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L1FinalizeBatchEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FinalizeBatch log failure", t, func() {
targetErr := errors.New("UnpackLog FinalizeBatch failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1FinalizeBatchEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridge_abi.L1FinalizeBatchEvent)
tmpOut.BatchHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
})
}

View File

@@ -3,22 +3,28 @@ package watcher
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
cutils "scroll-tech/common/utils"
@@ -26,6 +32,17 @@ import (
"scroll-tech/database/migrate"
)
func setupL2Watcher(t *testing.T) *L2WatcherClient {
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher
}
func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
@@ -254,3 +271,172 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{
bridge_abi.L2SentMessageEventSignature,
},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack SentMessage log failure", t, func() {
targetErr := errors.New("UnpackLog SentMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2SentMessageEventSignature success", t, func() {
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
tmpValue := big.NewInt(1000)
tmpMessageNonce := big.NewInt(100)
tmpMessage := []byte("test for L2SentMessageEventSignature")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2SentMessageEvent)
tmpOut.Sender = tmpSendAddr
tmpOut.Value = tmpValue
tmpOut.Target = tmpTargetAddr
tmpOut.MessageNonce = tmpMessageNonce
tmpOut.Message = tmpMessage
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.Error(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, l2Messages)
})
}
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
watcher := setupL2Watcher(t)
logs := []geth_types.Log{
{
Topics: []common.Hash{bridge_abi.L2AppendMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack AppendMessage log failure", t, func() {
targetErr := errors.New("UnpackLog AppendMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2AppendMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log geth_types.Log) error {
tmpOut := out.(*bridge_abi.L2AppendMessageEvent)
tmpOut.MessageHash = msgHash
tmpOut.Index = big.NewInt(100)
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
}

View File

@@ -77,13 +77,26 @@ func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l1 watcher test cases.
t.Run("TestStartWatcher", testStartWatcher)
t.Run("TestStartWatcher", testFetchContractEvent)
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:prealpha-v5.1
FROM scrolltech/l2geth:scroll-v3.1.4
RUN mkdir -p /l2geth/keystore

View File

@@ -16,7 +16,13 @@
"period": 3,
"epoch": 30000
},
"zktrie": true
"scroll": {
"useZktrie": true,
"maxTxPerBlock": 44,
"feeVaultAddress": "0x5300000000000000000000000000000000000005",
"enableEIP2718": false,
"enableEIP1559": false
}
},
"nonce": "0x0",
"timestamp": "0x61bc34a0",
@@ -50,5 +56,5 @@
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"baseFeePerGas": "0x2710"
"baseFeePerGas": null
}

View File

@@ -5,6 +5,8 @@ import (
"database/sql"
"fmt"
"time"
"scroll-tech/common/types/message"
)
// L1BlockStatus represents current l1 block processing status
@@ -163,6 +165,7 @@ type SessionInfo struct {
Rollers map[string]*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
Attempts uint8 `json:"attempts,omitempty"`
ProveType message.ProveType `json:"prove_type,omitempty"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -256,3 +259,16 @@ type BlockBatch struct {
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}
// AggTask is a wrapper type around db AggProveTask type.
type AggTask struct {
ID string `json:"id" db:"id"`
StartBatchIndex uint64 `json:"start_batch_index" db:"start_batch_index"`
StartBatchHash string `json:"start_batch_hash" db:"start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" db:"end_batch_index"`
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
CreatedTime *time.Time `json:"created_time" db:"created_time"`
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"`
}

View File

@@ -22,6 +22,27 @@ const (
StatusProofError
)
// ProveType represents the type of roller.
type ProveType uint8
func (r ProveType) String() string {
switch r {
case BasicProve:
return "Basic Prove"
case AggregatorProve:
return "Aggregator Prove"
default:
return "Illegal Prove type"
}
}
const (
// BasicProve is default roller, it only generates zk proof from traces.
BasicProve ProveType = iota
// AggregatorProve generates zk proof from other zk proofs and aggregate them into one proof.
AggregatorProve
)
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
// It effectively acts as a registration, and makes the Roller identification
// known to the Sequencer.
@@ -36,6 +57,8 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Roller RollerType
RollerType ProveType `json:"roller_type,omitempty"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
@@ -178,14 +201,19 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
ID string `json:"id"`
Traces []*types.BlockTrace `json:"blockTraces"`
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
// Only basic rollers need traces, aggregator rollers don't!
Traces []*types.BlockTrace `json:"blockTraces,omitempty"`
// Only aggregator rollers need proofs to aggregate, basic rollers don't!
SubProofs [][]byte `json:"sub_proofs,omitempty"`
}
// ProofDetail is the message received from rollers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
Type ProveType `json:"type,omitempty"`
Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"`

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v3.0.12"
var tag = "v3.0.14"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -216,12 +216,7 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, _to, _value, _messageNonce, _message);
// compute and deduct the messaging fee to fee vault.
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(
address(this),
_counterpart,
_xDomainCalldata,
_gasLimit
);
uint256 _fee = IL1MessageQueue(_messageQueue).estimateCrossDomainMessageFee(_gasLimit);
require(msg.value >= _fee + _value, "Insufficient msg.value");
if (_fee > 0) {
(bool _success, ) = feeVault.call{value: _fee}("");

View File

@@ -36,16 +36,8 @@ interface IL1MessageQueue {
function getCrossDomainMessage(uint256 queueIndex) external view returns (bytes32);
/// @notice Return the amount of ETH should pay for cross domain message.
/// @param sender The address of account who initiates the message in L1.
/// @param target The address of account who will recieve the message in L2.
/// @param message The content of the message.
/// @param gasLimit Gas limit required to complete the message relay on L2.
function estimateCrossDomainMessageFee(
address sender,
address target,
bytes memory message,
uint256 gasLimit
) external view returns (uint256);
function estimateCrossDomainMessageFee(uint256 gasLimit) external view returns (uint256);
/*****************************
* Public Mutating Functions *

View File

@@ -4,13 +4,6 @@ pragma solidity ^0.8.0;
interface IL2GasPriceOracle {
/// @notice Estimate fee for cross chain message call.
/// @param _sender The address of sender who invoke the call.
/// @param _to The target address to receive the call.
/// @param _message The message will be passed to the target address.
function estimateCrossDomainMessageFee(
address _sender,
address _to,
bytes memory _message,
uint256 _gasLimit
) external view returns (uint256);
/// @param _gasLimit Gas limit required to complete the message relay on L2.
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view returns (uint256);
}

View File

@@ -61,15 +61,10 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
}
/// @inheritdoc IL1MessageQueue
function estimateCrossDomainMessageFee(
address _sender,
address _target,
bytes memory _message,
uint256 _gasLimit
) external view override returns (uint256) {
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view override returns (uint256) {
address _oracle = gasOracle;
if (_oracle == address(0)) return 0;
return IL2GasPriceOracle(_oracle).estimateCrossDomainMessageFee(_sender, _target, _message, _gasLimit);
return IL2GasPriceOracle(_oracle).estimateCrossDomainMessageFee(_gasLimit);
}
/*****************************

View File

@@ -18,43 +18,14 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
/// @param _newWhitelist The address of new whitelist contract.
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
/// @notice Emitted when current fee overhead is updated.
/// @param overhead The current fee overhead updated.
event OverheadUpdated(uint256 overhead);
/// @notice Emitted when current fee scalar is updated.
/// @param scalar The current fee scalar updated.
event ScalarUpdated(uint256 scalar);
/// @notice Emitted when current l2 base fee is updated.
/// @param l2BaseFee The current l2 base fee updated.
event L2BaseFeeUpdated(uint256 l2BaseFee);
/*************
* Constants *
*************/
/// @dev The precision used in the scalar.
uint256 private constant PRECISION = 1e9;
/// @dev The maximum possible l1 fee overhead.
/// Computed based on current l1 block gas limit.
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
/// @dev The maximum possible l1 fee scale.
/// x1000 should be enough.
uint256 private constant MAX_SCALE = 1000 * PRECISION;
/*************
* Variables *
*************/
/// @notice The current l1 fee overhead.
uint256 public overhead;
/// @notice The current l1 fee scalar.
uint256 public scalar;
/// @notice The latest known l2 base fee.
uint256 public l2BaseFee;
@@ -73,46 +44,9 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
* Public View Functions *
*************************/
/// @notice Return the current l1 base fee.
function l1BaseFee() public view returns (uint256) {
return block.basefee;
}
/// @inheritdoc IL2GasPriceOracle
function estimateCrossDomainMessageFee(
address,
address,
bytes memory _message,
uint256 _gasLimit
) external view override returns (uint256) {
unchecked {
uint256 _l1GasUsed = getL1GasUsed(_message);
uint256 _rollupFee = (_l1GasUsed * l1BaseFee() * scalar) / PRECISION;
uint256 _l2Fee = _gasLimit * l2BaseFee;
return _l2Fee + _rollupFee;
}
}
/// @notice Computes the amount of L1 gas used for a transaction. Adds the overhead which
/// represents the per-transaction gas overhead of posting the transaction and state
/// roots to L1. Adds 68 bytes of padding to account for the fact that the input does
/// not have a signature.
/// @param _data Unsigned fully RLP-encoded transaction to get the L1 gas for.
/// @return Amount of L1 gas used to publish the transaction.
function getL1GasUsed(bytes memory _data) public view returns (uint256) {
uint256 _total = 0;
uint256 _length = _data.length;
unchecked {
for (uint256 i = 0; i < _length; i++) {
if (_data[i] == 0) {
_total += 4;
} else {
_total += 16;
}
}
uint256 _unsigned = _total + overhead;
return _unsigned + (68 * 16);
}
function estimateCrossDomainMessageFee(uint256 _gasLimit) external view override returns (uint256) {
return _gasLimit * l2BaseFee;
}
/*****************************
@@ -133,24 +67,6 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
* Restricted Functions *
************************/
/// @notice Allows the owner to modify the overhead.
/// @param _overhead New overhead
function setOverhead(uint256 _overhead) external onlyOwner {
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
overhead = _overhead;
emit OverheadUpdated(_overhead);
}
/// Allows the owner to modify the scalar.
/// @param _scalar The new scalar
function setScalar(uint256 _scalar) external onlyOwner {
require(_scalar <= MAX_SCALE, "exceed maximum scale");
scalar = _scalar;
emit ScalarUpdated(_scalar);
}
/// @notice Update whitelist contract.
/// @dev This function can only called by contract owner.
/// @param _newWhitelist The address of new whitelist contract.

View File

@@ -264,6 +264,13 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
emit CommitBatch(publicInputHash);
// As we are getting close to sunsetting the Alpha testnet,
// we now auto-finalize every batch to enable withdrawals.
lastFinalizedBatchHash = publicInputHash;
finalizedBatches[_batch.batchIndex] = publicInputHash;
_batchInStorage.finalized = true;
emit FinalizeBatch(publicInputHash);
return publicInputHash;
}

View File

@@ -21,11 +21,10 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils/workerpool"
"scroll-tech/database"
"scroll-tech/common/utils/workerpool"
"scroll-tech/coordinator/config"
"scroll-tech/coordinator/verifier"
)
@@ -52,6 +51,7 @@ const (
type rollerProofStatus struct {
id string
typ message.ProveType
pk string
status types.RollerProveStatus
}
@@ -162,14 +162,30 @@ func (m *Manager) isRunning() bool {
// Loop keeps the manager running.
func (m *Manager) Loop() {
var (
tick = time.NewTicker(time.Second * 2)
tasks []*types.BlockBatch
tick = time.NewTicker(time.Second * 2)
tasks []*types.BlockBatch
aggTasks []*types.AggTask
)
defer tick.Stop()
for {
select {
case <-tick.C:
// load and send aggregator tasks
if len(aggTasks) == 0 && m.orm != nil {
var err error
aggTasks, err = m.orm.GetUnassignedAggTasks()
if err != nil {
log.Error("failed to get unassigned aggregator proving tasks", "error", err)
continue
}
}
// Select aggregator type roller and send message
for len(aggTasks) > 0 && m.StartAggProofGenerationSession(aggTasks[0], nil) {
aggTasks = aggTasks[1:]
}
// load and send basic tasks
if len(tasks) == 0 && m.orm != nil {
var err error
// TODO: add cache
@@ -178,15 +194,15 @@ func (m *Manager) Loop() {
fmt.Sprintf(
"ORDER BY index %s LIMIT %d;",
m.cfg.OrderSession,
m.GetNumberOfIdleRollers(),
m.GetNumberOfIdleRollers(message.BasicProve),
),
); err != nil {
log.Error("failed to get unassigned proving tasks", "error", err)
log.Error("failed to get unassigned basic proving tasks", "error", err)
continue
}
}
// Select roller and send message
for len(tasks) > 0 && m.StartProofGenerationSession(tasks[0], nil) {
// Select basic type roller and send message
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
tasks = tasks[1:]
}
case <-m.ctx.Done():
@@ -209,31 +225,51 @@ func (m *Manager) restorePrevSessions() {
m.mu.Lock()
defer m.mu.Unlock()
if hashes, err := m.orm.GetAssignedBatchHashes(); err != nil {
log.Error("failed to get assigned batch hashes from db", "error", err)
} else if prevSessions, err := m.orm.GetSessionInfosByHashes(hashes); err != nil {
log.Error("failed to recover roller session info from db", "error", err)
} else {
for _, v := range prevSessions {
sess := &session{
info: v,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
go m.CollectProofs(sess)
}
var hashes []string
// load assigned aggregator tasks from db
aggTasks, err := m.orm.GetAssignedAggTasks()
if err != nil {
log.Error("failed to load assigned aggregator tasks from db", "error", err)
return
}
for _, aggTask := range aggTasks {
hashes = append(hashes, aggTask.ID)
}
// load assigned basic tasks from db
batchHashes, err := m.orm.GetAssignedBatchHashes()
if err != nil {
log.Error("failed to get assigned batch batchHashes from db", "error", err)
return
}
hashes = append(hashes, batchHashes...)
prevSessions, err := m.orm.GetSessionInfosByHashes(hashes)
if err != nil {
log.Error("failed to recover roller session info from db", "error", err)
return
}
for _, v := range prevSessions {
sess := &session{
info: v,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"prove type", sess.info.ProveType,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
go m.CollectProofs(sess)
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
@@ -258,7 +294,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
// Ensure this roller is eligible to participate in the session.
roller, ok := sess.info.Rollers[pk]
if !ok {
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
}
if roller.Status == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
@@ -269,6 +305,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"roller has already submitted valid proof in proof session",
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"prove type", sess.info.ProveType,
"proof id", msg.ID,
)
return nil
@@ -278,6 +315,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"prove type", sess.info.ProveType,
"proof time", proofTimeSec,
)
@@ -285,8 +323,15 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
// TODO: maybe we should use db tx for the whole process?
// Roll back current proof's status.
if dbErr != nil {
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task status as Unassigned", "msg.ID", msg.ID)
if msg.Type == message.BasicProve {
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset basic task status as Unassigned", "msg.ID", msg.ID)
}
}
if msg.Type == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset aggregator task status as Unassigned", "msg.ID", msg.ID)
}
}
}
// set proof status
@@ -295,7 +340,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
status = types.RollerProofValid
}
// notify the session that the roller finishes the proving process
sess.finishChan <- rollerProofStatus{msg.ID, pk, status}
sess.finishChan <- rollerProofStatus{msg.ID, msg.Type, pk, status}
}()
if msg.Status != message.StatusOk {
@@ -306,6 +351,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"prove type", msg.Type,
"proof time", proofTimeSec,
"error", msg.Error,
)
@@ -313,45 +359,67 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}
// store proof content
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
log.Error("failed to store proof into db", "error", dbErr)
return dbErr
if msg.Type == message.BasicProve {
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
log.Error("failed to store basic proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update basic task status as proved", "error", dbErr)
return dbErr
}
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update task status as proved", "error", dbErr)
return dbErr
if msg.Type == message.AggregatorProve {
if dbErr = m.orm.UpdateProofForAggTask(msg.ID, msg.Proof); dbErr != nil {
log.Error("failed to store aggregator proof into db", "error", dbErr)
return dbErr
}
}
coordinatorProofsReceivedTotalCounter.Inc(1)
var verifyErr error
// TODO: wrap both basic verifier and aggregator verifier
success, verifyErr = m.verifyProof(msg.Proof)
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
}
if success {
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
if msg.Type == message.AggregatorProve {
if dbErr = m.orm.UpdateAggTaskStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update aggregator proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
}
if msg.Type == message.BasicProve {
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update basic proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec)
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
@@ -367,7 +435,13 @@ func (m *Manager) CollectProofs(sess *session) {
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
// Check if session can be replayed
if sess.info.Attempts < m.cfg.SessionAttempts {
if m.StartProofGenerationSession(nil, sess) {
var success bool
if sess.info.ProveType == message.AggregatorProve {
success = m.StartAggProofGenerationSession(nil, sess)
} else if sess.info.ProveType == message.BasicProve {
success = m.StartBasicProofGenerationSession(nil, sess)
}
if success {
m.mu.Lock()
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
@@ -385,9 +459,17 @@ func (m *Manager) CollectProofs(sess *session) {
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
if sess.info.ProveType == message.BasicProve {
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
}
}
if sess.info.ProveType == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
}
}
m.mu.Lock()
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
@@ -402,9 +484,17 @@ func (m *Manager) CollectProofs(sess *session) {
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
if sess.isSessionFailed() {
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
if ret.typ == message.BasicProve {
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update basic proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if ret.typ == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
@@ -478,20 +568,21 @@ func (m *Manager) APIs() []rpc.API {
}
}
// StartProofGenerationSession starts a proof generation session
func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
// StartBasicProofGenerationSession starts a basic proof generation session
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
var taskId string
if task != nil {
taskId = task.Hash
} else {
taskId = prevSession.info.ID
}
if m.GetNumberOfIdleRollers() == 0 {
log.Warn("no idle roller when starting proof generation session", "id", taskId)
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
log.Warn("no idle basic roller when starting proof generation session", "id", taskId)
return false
}
log.Info("start proof generation session", "id", taskId)
log.Info("start basic proof generation session", "id", taskId)
defer func() {
if !success {
if task != nil {
@@ -530,17 +621,17 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
}
}
// Dispatch task to rollers.
// Dispatch task to basic rollers.
rollers := make(map[string]*types.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller()
roller := m.selectRoller(message.BasicProve)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(taskId, traces) {
if !roller.sendTask(&message.TaskMsg{ID: taskId, Type: message.BasicProve, Traces: traces}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
continue
}
@@ -549,7 +640,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
}
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", taskId, "number of idle rollers", m.GetNumberOfIdleRollers())
log.Error("no roller assigned", "id", taskId, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
return false
}
@@ -564,6 +655,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
info: &types.SessionInfo{
ID: taskId,
Rollers: rollers,
ProveType: message.BasicProve,
StartTimestamp: time.Now().Unix(),
Attempts: 1,
},
@@ -577,6 +669,114 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
log.Info(
"assigned proof to roller",
"session id", sess.info.ID,
"session type", sess.info.ProveType,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof status", roller.Status)
}
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
return false
}
m.mu.Lock()
m.sessions[taskId] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}
// StartAggProofGenerationSession starts an aggregator proof generation.
func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSession *session) (success bool) {
var taskId string
if task != nil {
taskId = task.ID
} else {
taskId = prevSession.info.ID
}
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
log.Warn("no idle common roller when starting proof generation session", "id", taskId)
return false
}
log.Info("start aggregator proof generation session", "id", taskId)
defer func() {
if !success {
if task != nil {
if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskId, "err", err)
} else if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskId, "err", err)
}
}
}
}()
// get agg task from db
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskId)
if err != nil {
log.Error("failed to get sub proofs for aggregator task", "id", taskId, "error", err)
return false
}
// Dispatch task to basic rollers.
rollers := make(map[string]*types.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.AggregatorProve)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{
ID: taskId,
Type: message.AggregatorProve,
SubProofs: subProofs,
}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
}
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", taskId, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskId, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
info: &types.SessionInfo{
ID: taskId,
Rollers: rollers,
ProveType: message.AggregatorProve,
StartTimestamp: time.Now().Unix(),
Attempts: 1,
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
if prevSession != nil {
sess.info.Attempts += prevSession.info.Attempts
}
for _, roller := range sess.info.Rollers {
log.Info(
"assigned proof to roller",
"session id", sess.info.ID,
"session type", sess.info.ProveType,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof status", roller.Status)
@@ -607,7 +807,7 @@ func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, _ := authMsg.PublicKey()
// GetValue returns nil if value is expired
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, errors.New("failed to find corresponding token")
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s.", authMsg.Identity.Name, pubkey)
}
return true, nil
}

View File

@@ -111,7 +111,7 @@ func testHandshake(t *testing.T) {
roller := newMockRoller(t, "roller_test", wsURL)
defer roller.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
}
func testFailedHandshake(t *testing.T) {
@@ -174,7 +174,7 @@ func testFailedHandshake(t *testing.T) {
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
}
func testSeveralConnections(t *testing.T) {
@@ -204,7 +204,7 @@ func testSeveralConnections(t *testing.T) {
assert.NoError(t, eg.Wait())
// check roller's idle connections
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
// close connection
for _, roller := range rollers {
@@ -218,7 +218,7 @@ func testSeveralConnections(t *testing.T) {
for {
select {
case <-tick:
if rollerManager.GetNumberOfIdleRollers() == 0 {
if rollerManager.GetNumberOfIdleRollers(message.BasicProve) == 0 {
return
}
case <-tickStop:
@@ -260,7 +260,7 @@ func testValidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
@@ -318,7 +318,7 @@ func testInvalidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
@@ -376,7 +376,7 @@ func testProofGeneratedFailed(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
@@ -428,7 +428,7 @@ func testTimedoutProof(t *testing.T) {
// close connection
roller1.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()
@@ -466,7 +466,7 @@ func testTimedoutProof(t *testing.T) {
// close connection
roller2.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.BasicProve))
// wait manager to finish first CollectProofs
<-time.After(60 * time.Second)
@@ -515,7 +515,7 @@ func testIdleRollerSelection(t *testing.T) {
}
}()
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers())
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers(message.BasicProve))
var hashes = make([]string, 1)
dbTx, err := l2db.Beginx()

View File

@@ -7,7 +7,6 @@ import (
"time"
cmap "github.com/orcaman/concurrent-map"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
@@ -20,6 +19,8 @@ import (
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProveType
// Roller public key
PublicKey string
// Roller version
@@ -36,13 +37,10 @@ type rollerNode struct {
*rollerMetrics
}
func (r *rollerNode) sendTask(id string, traces []*geth_types.BlockTrace) bool {
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
select {
case r.taskChan <- &message.TaskMsg{
ID: id,
Traces: traces,
}:
r.TaskIDs.Set(id, struct{}{})
case r.taskChan <- msg:
r.TaskIDs.Set(msg.ID, struct{}{})
default:
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
return false
@@ -77,6 +75,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
@@ -88,7 +87,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public key", pubkey)
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
@@ -117,11 +116,11 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers() (count int) {
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProveType) (count int) {
for _, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 {
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
count++
}
}
@@ -129,13 +128,13 @@ func (m *Manager) GetNumberOfIdleRollers() (count int) {
return count
}
func (m *Manager) selectRoller() *rollerNode {
func (m *Manager) selectRoller(rollerType message.ProveType) *rollerNode {
pubkeys := m.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := m.rollerPool.Get(pubkeys[idx.Int64()]); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 {
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
return r
}
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 6, int(cur))
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
create table agg_task
(
id VARCHAR NOT NULL,
start_batch_index BIGINT NOT NULL,
start_batch_hash VARCHAR NOT NULL,
end_batch_index BIGINT NOT NULL,
end_batch_hash VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
proof BYTEA DEFAULT NULL,
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
);
create unique index agg_task_hash_uindex
on agg_task (id);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_time = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON agg_task FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists agg_task;
-- +goose StatementEnd

96
database/orm/agg_task.go Normal file
View File

@@ -0,0 +1,96 @@
package orm
import (
"encoding/json"
"github.com/jmoiron/sqlx"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
type aggTaskOrm struct {
db *sqlx.DB
}
var _ AggTaskOrm = (*aggTaskOrm)(nil)
// NewAggTaskOrm creates an AggTaskOrm instance
func NewAggTaskOrm(db *sqlx.DB) AggTaskOrm {
return &aggTaskOrm{db: db}
}
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([][]byte, error) {
var (
startIdx uint64
endIdx uint64
)
row := a.db.QueryRow("SELECT start_batch_index, end_batch_index FROM agg_task where id = $1", id)
err := row.Scan(&startIdx, &endIdx)
if err != nil {
return nil, err
}
rows, err := a.db.Queryx("SELECT proof FROM block_batch WHERE index>=$1 AND index<=$2 and proving_status = $3", startIdx, endIdx, types.ProvingTaskVerified)
if err != nil {
return nil, err
}
var subProofs [][]byte
for rows.Next() {
var proofByt []byte
err = rows.Scan(&proofByt)
if err != nil {
return nil, err
}
subProofs = append(subProofs, proofByt)
}
return subProofs, nil
}
func (a *aggTaskOrm) GetUnassignedAggTasks() ([]*types.AggTask, error) {
rows, err := a.db.Queryx("SELECT * FROM agg_task where proving_status = 1;")
if err != nil {
return nil, err
}
return a.rowsToAggTask(rows)
}
func (a *aggTaskOrm) GetAssignedAggTasks() ([]*types.AggTask, error) {
rows, err := a.db.Queryx(`SELECT * FROM agg_task WHERE proving_status IN ($1, $2)`, types.ProvingTaskAssigned, types.ProvingTaskProved)
if err != nil {
return nil, err
}
return a.rowsToAggTask(rows)
}
func (a *aggTaskOrm) InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error {
sqlStr := "INSERT INTO agg_task (id, start_batch_index, start_batch_hash, end_batch_index, end_batch_hash) VALUES ($1, $2, $3, $4, $5)"
_, err := a.db.Exec(sqlStr, id, startBatchIndex, startBatchHash, endBatchIndex, endBatchHash)
return err
}
func (a *aggTaskOrm) UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error {
_, err := a.db.Exec(a.db.Rebind("update agg_task set proving_status = ? where id = ?;"), status, aggTaskID)
return err
}
func (a *aggTaskOrm) UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error {
proofByt, err := json.Marshal(proof)
if err != nil {
return err
}
_, err = a.db.Exec(a.db.Rebind("update agg_task set proving_status = ?, proof = ? where id = ?;"), types.ProvingTaskProved, proofByt, aggTaskID)
return err
}
func (a *aggTaskOrm) rowsToAggTask(rows *sqlx.Rows) ([]*types.AggTask, error) {
var tasks []*types.AggTask
for rows.Next() {
task := new(types.AggTask)
err := rows.StructScan(task)
if err != nil {
return nil, err
}
tasks = append(tasks, task)
}
return tasks, nil
}

View File

@@ -5,6 +5,7 @@ import (
"database/sql"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/common"
@@ -45,6 +46,16 @@ type SessionInfoOrm interface {
SetSessionInfo(rollersInfo *types.SessionInfo) error
}
// AggTaskOrm is aggregator task
type AggTaskOrm interface {
GetAssignedAggTasks() ([]*types.AggTask, error)
GetUnassignedAggTasks() ([]*types.AggTask, error)
GetSubProofsByAggTaskID(id string) ([][]byte, error)
InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error
UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error
UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error
}
// BlockBatchOrm block_batch operation interface
type BlockBatchOrm interface {
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error)

View File

@@ -15,6 +15,7 @@ type OrmFactory interface {
orm.L1MessageOrm
orm.L2MessageOrm
orm.SessionInfoOrm
orm.AggTaskOrm
GetDB() *sqlx.DB
Beginx() (*sqlx.Tx, error)
Close() error
@@ -27,6 +28,7 @@ type ormFactory struct {
orm.L1MessageOrm
orm.L2MessageOrm
orm.SessionInfoOrm
orm.AggTaskOrm
*sqlx.DB
}
@@ -51,6 +53,7 @@ func NewOrmFactory(cfg *DBConfig) (OrmFactory, error) {
L2MessageOrm: orm.NewL2MessageOrm(db),
L1BlockOrm: orm.NewL1BlockOrm(db),
SessionInfoOrm: orm.NewSessionInfoOrm(db),
AggTaskOrm: orm.NewAggTaskOrm(db),
DB: db,
}, nil
}

View File

@@ -16,6 +16,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
abi "scroll-tech/bridge/abi"
@@ -71,6 +72,13 @@ var (
Layer2Hash: "hash1",
},
}
proof1 = []byte{1}
subProofs = [][]byte{proof1}
aggTask1 = &types.AggTask{ID: "test-agg-1"}
aggTask2 = &types.AggTask{ID: "test-agg-2"}
wrappedBlock *types.WrappedBlock
batchData1 *types.BatchData
batchData2 *types.BatchData
@@ -81,6 +89,7 @@ var (
ormLayer2 orm.L2MessageOrm
ormBatch orm.BlockBatchOrm
ormSession orm.SessionInfoOrm
ormAggTask orm.AggTaskOrm
)
func setupEnv(t *testing.T) error {
@@ -99,6 +108,7 @@ func setupEnv(t *testing.T) error {
ormLayer2 = orm.NewL2MessageOrm(db)
ormBatch = orm.NewBlockBatchOrm(db)
ormSession = orm.NewSessionInfoOrm(db)
ormAggTask = orm.NewAggTaskOrm(db)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
if err != nil {
@@ -116,6 +126,11 @@ func setupEnv(t *testing.T) error {
}
batchData1 = types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
aggTask1.StartBatchIndex = batchData1.Batch.BatchIndex
aggTask1.EndBatchIndex = batchData1.Batch.BatchIndex
aggTask1.StartBatchHash = batchData1.Hash().Hex()
aggTask1.EndBatchHash = batchData1.Hash().Hex()
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
if err != nil {
return err
@@ -156,6 +171,7 @@ func TestOrmFactory(t *testing.T) {
defer func() {
base.Free()
}()
t.Log("test database DSN is ", base.DBConfig.DSN)
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
@@ -169,6 +185,8 @@ func TestOrmFactory(t *testing.T) {
t.Run("testOrmBlockBatch", testOrmBlockBatch)
t.Run("testOrmSessionInfo", testOrmSessionInfo)
t.Run("testOrmAggTask", testOrmAggTask)
}
func testOrmBlockTraces(t *testing.T) {
@@ -326,7 +344,7 @@ func testOrmBlockBatch(t *testing.T) {
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, []byte{1}, []byte{2}, 1200)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, []byte{2}, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err)
@@ -424,3 +442,95 @@ func testOrmSessionInfo(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
}
func testOrmAggTask(t *testing.T) {
// Create db handler and reset db.
factory, err := database.NewOrmFactory(base.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
// update block_batch with proof bytes
dbTx, err := factory.Beginx()
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
assert.NoError(t, err)
batchHash1 := batchData1.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchHash1)
assert.NoError(t, err)
err = ormBatch.NewBatchInDBTx(dbTx, batchData2)
assert.NoError(t, err)
batchHash2 := batchData2.Hash().Hex()
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber,
batchData2.Batch.Blocks[1].BlockNumber}, batchHash2)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
batches, err := ormBatch.GetBlockBatches(map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, int(2), len(batches))
batcheHashes, err := ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(2), len(batcheHashes))
assert.Equal(t, batchHash1, batcheHashes[0])
assert.Equal(t, batchHash2, batcheHashes[1])
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commit_tx_1", types.RollupCommitted)
assert.NoError(t, err)
batcheHashes, err = ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(1), len(batcheHashes))
assert.Equal(t, batchHash2, batcheHashes[0])
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, proof1, []byte{2}, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
assert.NoError(t, err)
provingStatus, err = ormBatch.GetProvingStatusByHash(batchHash1)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskVerified, provingStatus)
// set agg task into db
err = ormAggTask.InsertAggTask(aggTask1.ID, aggTask1.StartBatchIndex, aggTask1.StartBatchHash, aggTask1.EndBatchIndex, aggTask1.EndBatchHash)
assert.NoError(t, err)
err = ormAggTask.InsertAggTask(aggTask2.ID, aggTask2.StartBatchIndex, aggTask2.StartBatchHash, aggTask2.EndBatchIndex, aggTask2.EndBatchHash)
assert.NoError(t, err)
// get subProofs by hash
getsProofs, err := ormAggTask.GetSubProofsByAggTaskID(aggTask1.ID)
assert.NoError(t, err)
assert.Equal(t, subProofs, getsProofs)
// get unassigned agg tasks
tasks, err := ormAggTask.GetUnassignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, tasks[0].ID, aggTask1.ID)
assert.Equal(t, tasks[1].ID, aggTask2.ID)
// update status agg proof into db
err = ormAggTask.UpdateAggTaskStatus(aggTask1.ID, types.ProvingTaskAssigned)
assert.NoError(t, err)
// check assigned agg task
assigns, err := ormAggTask.GetAssignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, assigns[0].ID, aggTask1.ID)
// insert aggregator proof
err = ormAggTask.UpdateProofForAggTask(aggTask1.ID, &message.AggProof{})
assert.NoError(t, err)
// mark verified
err = ormAggTask.UpdateAggTaskStatus(aggTask1.ID, types.ProvingTaskVerified)
assert.NoError(t, err)
// get left unassigned task
unassignTasks, err := ormAggTask.GetUnassignedAggTasks()
assert.NoError(t, err)
assert.Equal(t, unassignTasks[0].ID, aggTask2.ID)
}

View File

@@ -79,6 +79,7 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
@@ -119,9 +120,11 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572 h1:+R8G1+Ftumd0DaveLgMIjrFPcAS4G8MsVXWXiyZL5BY=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c h1:llSLg4o9EgH3SrXky+Q5BqEYqV76NGKo07K5Ps2pIKo=
github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA=
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -130,6 +133,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
@@ -137,6 +141,7 @@ github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
@@ -150,6 +155,7 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@@ -161,6 +167,7 @@ github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRP
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ=
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
@@ -187,6 +194,7 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc=
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0=
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
@@ -202,19 +210,19 @@ github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
@@ -309,16 +317,19 @@ github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@@ -327,7 +338,6 @@ github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQ
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
@@ -340,11 +350,14 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
@@ -357,10 +370,13 @@ github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5Vgl
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
@@ -375,10 +391,13 @@ github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
@@ -398,8 +417,11 @@ github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPO
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
@@ -407,18 +429,23 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCAy1QjzINvKe/pYtLjo2dl59x2w9YSEJxuY=
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
@@ -468,7 +495,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNT
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -479,7 +505,6 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -492,16 +517,17 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
@@ -530,8 +556,11 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -5,6 +5,8 @@ import (
"os"
"path/filepath"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
)
@@ -20,9 +22,10 @@ type Config struct {
// ProverConfig load zk prover config.
type ProverConfig struct {
ParamsPath string `json:"params_path"`
SeedPath string `json:"seed_path"`
DumpDir string `json:"dump_dir,omitempty"`
ParamsPath string `json:"params_path"`
SeedPath string `json:"seed_path"`
ProveType message.ProveType `json:"prove_type,omitempty"` // 0: basic roller (default type), 1: aggregator roller
DumpDir string `json:"dump_dir,omitempty"`
}
// NewConfig returns a new instance of Config.

View File

@@ -51,15 +51,20 @@ func NewProver(cfg *config.ProverConfig) (*Prover, error) {
// Prove call rust ffi to generate proof, if first failed, try again.
func (p *Prover) Prove(task *message.TaskMsg) (*message.AggProof, error) {
tracesByt, err := json.Marshal(task.Traces)
if err != nil {
return nil, err
var proofByt []byte
if p.cfg.ProveType == message.BasicProve {
tracesByt, err := json.Marshal(task.Traces)
if err != nil {
return nil, err
}
proofByt = p.prove(tracesByt)
} else if p.cfg.ProveType == message.AggregatorProve {
// TODO: aggregator prove
}
proofByt := p.prove(tracesByt)
// dump proof
err = p.dumpProof(task.ID, proofByt)
err := p.dumpProof(task.ID, proofByt)
if err != nil {
log.Error("Dump proof failed", "task-id", task.ID, "error", err)
}

View File

@@ -86,6 +86,11 @@ func NewRoller(cfg *config.Config) (*Roller, error) {
}, nil
}
// Type returns roller type.
func (r *Roller) Type() message.ProveType {
return r.cfg.Prover.ProveType
}
// PublicKey translate public key to hex and return.
func (r *Roller) PublicKey() string {
return common.Bytes2Hex(crypto.CompressPubkey(&r.priv.PublicKey))
@@ -113,9 +118,10 @@ func (r *Roller) Register() error {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.cfg.RollerName,
Timestamp: uint32(timestamp),
Version: version.Version,
Name: r.cfg.RollerName,
RollerType: r.Type(),
Timestamp: uint32(timestamp),
Version: version.Version,
},
}
// Sign request token message
@@ -227,6 +233,7 @@ func (r *Roller) prove() error {
Status: message.StatusProofError,
Error: err.Error(),
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
}
log.Error("prove block failed!", "task-id", task.Task.ID)
@@ -234,6 +241,7 @@ func (r *Roller) prove() error {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Type: task.Task.Type,
Proof: proof,
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
@@ -245,6 +253,7 @@ func (r *Roller) prove() error {
Status: message.StatusProofError,
Error: "zk proving panic",
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
}
}