Compare commits

..

18 Commits

Author SHA1 Message Date
vincent
2b7b7dab86 fix 2023-07-05 13:24:10 +08:00
vincent
075d25ac87 update select 2023-07-05 13:14:57 +08:00
vincent
f87dca41e6 bump version 2023-07-05 12:25:29 +08:00
vincent
507ee571f6 fix 2023-07-05 12:10:49 +08:00
vincent
9651e1ca6e update db insert logic 2023-07-05 11:55:09 +08:00
ChuhanJin
672c2dd49c fix(bridge-history-api): fix relayed_msg table unique index (#611)
Co-authored-by: vincent <419436363@qq.com>
2023-07-05 10:58:35 +08:00
ChuhanJin
3d9fce26b6 refactor(bridge-history-api): add combine constraint (#585)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-04 13:48:51 +08:00
georgehao
95124ce70e feat(coordinator): refactor coordinator session info (#604)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-03 21:36:01 +08:00
Péter Garamvölgyi
f8d4855f26 fix: use QueueIndex as nonce for L1MessageTx (#603) 2023-06-30 15:55:14 +02:00
ChuhanJin
e303fafefc feat(bridge): user claim part2: disable bridge relay (#571)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-30 16:23:32 +08:00
Péter Garamvölgyi
f00c400993 fix(chunk proposer): treat L2 tx gas check as soft limit (#602) 2023-06-29 23:32:16 +08:00
Péter Garamvölgyi
bb6428848f fix: improve import genesis logs (#601) 2023-06-29 17:18:54 +02:00
HAOYUatHZ
df97200a41 feat(roller): return concrete error for get traces error (#595)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-06-29 19:09:40 +08:00
Péter Garamvölgyi
c8146ebb1a feat(rollup-relayer): commit genesis batch to L1 during startup (#596) 2023-06-29 15:55:21 +08:00
Lawliet-Chan
5390ec93b4 refactor(zkp): Zk test automation (#589)
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-29 09:23:58 +08:00
Péter Garamvölgyi
3ce671098a fix(contracts): update deployment scripts (#594) 2023-06-28 11:52:25 +02:00
Xi Lin
7e9fb0c667 feat(bridge): add function to decode batch range from calldata (#562)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-28 10:36:15 +08:00
ChuhanJin
d7d09c2ac7 feat(bridge-history-api): support let user relay l2 msg (#563)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-28 10:21:42 +08:00
77 changed files with 1973 additions and 2329346 deletions

View File

@@ -1,7 +1,5 @@
.PHONY: check update dev_docker build_test_docker run_test_docker clean
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -40,16 +38,5 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

View File

@@ -43,6 +43,8 @@ var (
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
ScrollChainABI *abi.ABI
// ScrollChainV2ABI holds information about ScrollChainV2's context and available invokable methods.
ScrollChainV2ABI *abi.ABI
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
L1ScrollMessengerABI *abi.ABI
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
@@ -116,6 +118,7 @@ func init() {
// scroll monorepo
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
@@ -198,6 +201,11 @@ var ScrollChainMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch\",\"name\":\"batch\",\"type\":\"tuple\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch[]\",\"name\":\"batches\",\"type\":\"tuple[]\"}],\"name\":\"commitBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"getL2MessageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
}
// ScrollChainV2MetaData contains all meta data concerning the ScrollChain contract.
var ScrollChainV2MetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract.
var L1ScrollMessengerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL1ScrollMessenger.L2MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
@@ -337,6 +345,11 @@ type IScrollChainBatch struct {
L2Transactions []byte
}
// L1CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract.
type L1CommitBatchEvent struct {
BatchHash common.Hash
}
// IScrollChainBlockContext is an auto generated low-level Go binding around an user-defined struct.
type IScrollChainBlockContext struct {
BlockHash common.Hash

View File

@@ -13,6 +13,7 @@ import (
"bridge-history-api/config"
"bridge-history-api/cross_msg"
"bridge-history-api/cross_msg/message_proof"
"bridge-history-api/db"
cutils "bridge-history-api/utils"
)
@@ -54,6 +55,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
db, err := db.NewOrmFactory(cfg)
defer db.Close()
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
@@ -98,13 +100,20 @@ func action(ctx *cli.Context) error {
go l2crossMsgFetcher.Start()
defer l2crossMsgFetcher.Stop()
l1BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L1.Confirmation), int(cfg.L1.BlockTime), l1client, db.UpdateL1Blocktimestamp, db.GetL1EarliestNoBlocktimestampHeight)
go l1BlocktimeFetcher.Start()
defer l1BlocktimeFetcher.Stop()
// BlockTimestamp fetcher for l1 and l2
l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
go l1BlockTimeFetcher.Start()
defer l1BlockTimeFetcher.Stop()
l2BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L2.Confirmation), int(cfg.L2.BlockTime), l2client, db.UpdateL2Blocktimestamp, db.GetL2EarliestNoBlocktimestampHeight)
go l2BlocktimeFetcher.Start()
defer l2BlocktimeFetcher.Stop()
l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
go l2BlockTimeFetcher.Start()
defer l2BlockTimeFetcher.Stop()
// Proof updater and batch fetcher
l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
go batchFetcher.Start()
defer batchFetcher.Stop()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -1,4 +1,8 @@
{
"batchInfoFetcher": {
"batchIndexStartBlock": 9091265,
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
},
"l1": {
"confirmation": 64,
"endpoint": "https://rpc.ankr.com/eth_goerli",

View File

@@ -6,6 +6,11 @@ import (
"path/filepath"
)
type BatchInfoFetcherConfig struct {
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
ScrollChainAddr string `json:"ScrollChainAddr"`
}
// DBConfig db config
type DBConfig struct {
// data source name
@@ -41,8 +46,9 @@ type Config struct {
L2 *LayerConfig `json:"l2"`
// data source name
DB *DBConfig `json:"db"`
Server *ServerConfig `json:"server"`
DB *DBConfig `json:"db"`
Server *ServerConfig `json:"server"`
BatchInfoFetcher *BatchInfoFetcherConfig `json:"batchInfoFetcher"`
}
// NewConfig returns a new instance of Config.

View File

@@ -0,0 +1,105 @@
package cross_msg
import (
"context"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/cross_msg/message_proof"
"bridge-history-api/db"
"bridge-history-api/utils"
)
type BatchInfoFetcher struct {
ctx context.Context
scrollChainAddr common.Address
batchInfoStartNumber uint64
confirmation uint64
blockTimeInSec int
client *ethclient.Client
db db.OrmFactory
msgProofUpdater *message_proof.MsgProofUpdater
}
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher {
return &BatchInfoFetcher{
ctx: ctx,
scrollChainAddr: scrollChainAddr,
batchInfoStartNumber: batchInfoStartNumber,
confirmation: confirmation,
blockTimeInSec: blockTimeInSec,
client: client,
db: db,
msgProofUpdater: msgProofUpdater,
}
}
func (b *BatchInfoFetcher) Start() {
log.Info("BatchInfoFetcher Start")
// Fetch batch info at beginning
// Then start msg proof updater after db have some bridge batch
err := b.fetchBatchInfo()
if err != nil {
log.Error("fetch batch info at begining failed: ", "err", err)
}
go b.msgProofUpdater.Start()
go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
for {
select {
case <-b.ctx.Done():
tick.Stop()
return
case <-tick.C:
err := b.fetchBatchInfo()
if err != nil {
log.Error("fetch batch info failed: ", "err", err)
}
}
}
}()
}
func (b *BatchInfoFetcher) Stop() {
log.Info("BatchInfoFetcher Stop")
b.msgProofUpdater.Stop()
}
func (b *BatchInfoFetcher) fetchBatchInfo() error {
number, err := utils.GetSafeBlockNumber(b.ctx, b.client, b.confirmation)
if err != nil {
log.Error("Can not get latest block number: ", "err", err)
return err
}
latestBatch, err := b.db.GetLatestRollupBatch()
if err != nil {
log.Error("Can not get latest BatchInfo: ", "err", err)
return err
}
var startHeight uint64
if latestBatch == nil {
startHeight = b.batchInfoStartNumber
} else {
startHeight = latestBatch.CommitHeight + 1
}
for from := startHeight; number >= from; from += uint64(fetchLimit) {
to := from + uint64(fetchLimit) - 1
// number - confirmation can never less than 0 since the for loop condition
// but watch out the overflow
if to > number {
to = number
}
// filter logs to fetch batches
err = FetchAndSaveBatchIndex(b.ctx, b.client, b.db, int64(from), int64(to), b.scrollChainAddr)
if err != nil {
log.Error("Can not fetch and save from chain: ", "err", err)
return err
}
}
return nil
}

View File

@@ -9,30 +9,30 @@ import (
"github.com/ethereum/go-ethereum/log"
)
type GetEarliestNoBlocktimestampHeightFunc func() (uint64, error)
type UpdateBlocktimestampFunc func(height uint64, timestamp time.Time) error
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
type BlocktimestampFetcher struct {
type BlockTimestampFetcher struct {
ctx context.Context
confirmation uint
confirmation uint64
blockTimeInSec int
client *ethclient.Client
updateBlocktimestampFunc UpdateBlocktimestampFunc
getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc
updateBlockTimestampFunc UpdateBlockTimestampFunc
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
}
func NewBlocktimestampFetcher(ctx context.Context, confirmation uint, blockTimeInSec int, client *ethclient.Client, updateBlocktimestampFunc UpdateBlocktimestampFunc, getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc) *BlocktimestampFetcher {
return &BlocktimestampFetcher{
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
return &BlockTimestampFetcher{
ctx: ctx,
confirmation: confirmation,
blockTimeInSec: blockTimeInSec,
client: client,
getEarliestNoBlocktimestampHeightFunc: getEarliestNoBlocktimestampHeightFunc,
updateBlocktimestampFunc: updateBlocktimestampFunc,
getEarliestNoBlockTimestampHeightFunc: getEarliestNoBlockTimestampHeightFunc,
updateBlockTimestampFunc: updateBlockTimestampFunc,
}
}
func (b *BlocktimestampFetcher) Start() {
func (b *BlockTimestampFetcher) Start() {
go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
for {
@@ -46,23 +46,23 @@ func (b *BlocktimestampFetcher) Start() {
log.Error("Can not get latest block number", "err", err)
continue
}
startHeight, err := b.getEarliestNoBlocktimestampHeightFunc()
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc()
if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err)
continue
}
for height := startHeight; number >= height+uint64(b.confirmation) && height > 0; {
for height := startHeight; number >= height+b.confirmation && height > 0; {
block, err := b.client.HeaderByNumber(b.ctx, new(big.Int).SetUint64(height))
if err != nil {
log.Error("Can not get block by number", "err", err)
break
}
err = b.updateBlocktimestampFunc(height, time.Unix(int64(block.Time), 0))
err = b.updateBlockTimestampFunc(height, time.Unix(int64(block.Time), 0))
if err != nil {
log.Error("Can not update blocktimstamp into DB ", "err", err)
log.Error("Can not update blockTimestamp into DB ", "err", err)
break
}
height, err = b.getEarliestNoBlocktimestampHeightFunc()
height, err = b.getEarliestNoBlockTimestampHeightFunc()
if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err)
break
@@ -73,7 +73,6 @@ func (b *BlocktimestampFetcher) Start() {
}()
}
func (b *BlocktimestampFetcher) Stop() {
log.Info("BlocktimestampFetcher Stop")
b.ctx.Done()
func (b *BlockTimestampFetcher) Stop() {
log.Info("BlockTimestampFetcher Stop")
}

View File

@@ -27,13 +27,12 @@ type CrossMsgFetcher struct {
reorgHandling ReorgHandling
addressList []common.Address
cachedHeaders []*types.Header
mu *sync.Mutex
mu sync.Mutex
reorgStartCh chan struct{}
reorgEndCh chan struct{}
}
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
newMU := &sync.Mutex{}
crossMsgFetcher := &CrossMsgFetcher{
ctx: ctx,
config: config,
@@ -41,7 +40,6 @@ func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.O
client: client,
worker: worker,
reorgHandling: reorg,
mu: newMU,
addressList: addressList,
cachedHeaders: make([]*types.Header, 0),
reorgStartCh: make(chan struct{}),
@@ -77,7 +75,7 @@ func (c *CrossMsgFetcher) Start() {
return
case <-tick.C:
c.mu.Lock()
c.forwardFetchAndSaveMissingEvents(0)
c.forwardFetchAndSaveMissingEvents(1)
c.mu.Unlock()
}
}
@@ -97,7 +95,6 @@ func (c *CrossMsgFetcher) Start() {
}
func (c *CrossMsgFetcher) Stop() {
c.db.Close()
log.Info("CrossMsgFetcher Stop")
}
@@ -106,7 +103,7 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
// if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64
var err error
if len(c.cachedHeaders) != 0 && confirmation <= 0 {
if len(c.cachedHeaders) != 0 && confirmation == 0 {
number = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Uint64() - 1
} else {
number, err = utils.GetSafeBlockNumber(c.ctx, c.client, confirmation)
@@ -119,22 +116,22 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
return
}
processed_height, err := c.worker.G(c.db)
processedHeight, err := c.worker.G(c.db)
if err != nil {
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
}
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processed_height)
if processed_height <= 0 || processed_height < int64(c.config.StartHeight) {
processed_height = int64(c.config.StartHeight)
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
processedHeight = int64(c.config.StartHeight)
} else {
processed_height += 1
processedHeight += 1
}
for n := processed_height; n <= int64(number); n += FETCH_LIMIT {
iter_end := n + FETCH_LIMIT - 1
if iter_end > int64(number) {
iter_end = int64(number)
for from := processedHeight; from <= int64(number); from += fetchLimit {
to := from + fetchLimit - 1
if to > int64(number) {
to = int64(number)
}
err := c.worker.F(c.ctx, c.client, c.db, n, iter_end, c.addressList)
err := c.worker.F(c.ctx, c.client, c.db, from, to, c.addressList)
if err != nil {
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
break

View File

@@ -6,7 +6,6 @@ import (
geth "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
@@ -20,7 +19,7 @@ import (
// Todo : read from config
var (
// the number of blocks fetch per round
FETCH_LIMIT = int64(3000)
fetchLimit = int64(3000)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
@@ -36,11 +35,6 @@ type FetchEventWorker struct {
Name string
}
type msgHashWrapper struct {
msgHash common.Hash
txHash common.Hash
}
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL1ProcessedHeight()
if err != nil {
@@ -70,15 +64,22 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
log.Error("failed to get L2 relayed message processed height", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
} else {
return relayedHeight, nil
l2SentHeight, err := db.GetLatestSentMsgHeightOnL2()
if err != nil {
log.Error("failed to get L2 sent message processed height", "err", err)
return 0, err
}
maxHeight := crossHeight
if maxHeight < relayedHeight {
maxHeight = relayedHeight
}
if maxHeight < l2SentHeight {
maxHeight = l2SentHeight
}
return maxHeight, nil
}
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
@@ -99,11 +100,19 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, msgHashes, relayedMsg, err := parseBackendL1EventLogs(logs)
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
for i := range depositL1CrossMsgs {
for _, msgHash := range msgHashes {
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -120,11 +129,6 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = updateL1CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to update msgHash in L1 cross msg", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
@@ -157,11 +161,22 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, msgHashes, relayedMsg, err := parseBackendL2EventLogs(logs)
depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
var l2SentMsgs []*orm.L2SentMsg
for i := range depositL2CrossMsgs {
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
@@ -178,11 +193,15 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
if len(l2SentMsgs) > 0 {
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
@@ -194,234 +213,61 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
return nil
}
func parseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []msgHashWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.DepositETH{}
err := utils.UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(),
})
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
})
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
Amount: event.Amount.String(),
})
case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{}
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, msgHashWrapper{
msgHash: msgHash,
txHash: vlog.TxHash})
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer1Hash: vlog.TxHash.Hex(),
})
}
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{scrollChainAddr},
Topics: make([][]common.Hash, 1),
}
return l1CrossMsg, msgHashes, relayedMsgs, nil
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get batch commit event logs", "err", err)
return err
}
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
return err
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
if err != nil {
dbTx.Rollback()
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func parseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []msgHashWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.DepositETH{}
err := utils.UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(),
})
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
Amount: event.Amount.String(),
})
case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, msgHashWrapper{
msgHash: msgHash,
txHash: vlog.TxHash})
case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer2Hash: vlog.TxHash.Hex(),
})
}
}
return l2CrossMsg, msgHashes, relayedMsgs, nil
}
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.txHash, "err", err)
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.txHash, "err", err)
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
continue
}
}

View File

@@ -0,0 +1,241 @@
package message_proof
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/db"
"bridge-history-api/db/orm"
)
type MsgProofUpdater struct {
ctx context.Context
db db.OrmFactory
withdrawTrie *WithdrawTrie
}
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
return &MsgProofUpdater{
ctx: ctx,
db: db,
withdrawTrie: NewWithdrawTrie(),
}
}
func (m *MsgProofUpdater) Start() {
log.Info("MsgProofUpdater Start")
m.initialize(m.ctx)
go func() {
tick := time.NewTicker(10 * time.Second)
for {
select {
case <-m.ctx.Done():
tick.Stop()
return
case <-tick.C:
latestBatch, err := m.db.GetLatestRollupBatch()
if err != nil {
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
continue
}
if latestBatch == nil {
continue
}
latestBatchIndexWithProof, err := m.db.GetLatestL2SentMsgBatchIndex()
if err != nil {
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
continue
}
var start uint64
if latestBatchIndexWithProof < 0 {
start = 1
} else {
start = uint64(latestBatchIndexWithProof) + 1
}
for i := start; i <= latestBatch.BatchIndex; i++ {
batch, err := m.db.GetRollupBatchByIndex(i)
if err != nil {
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
break
}
// get all l2 messages in this batch
msgs, proofs, err := m.appendL2Messages(batch.StartBlockNumber, batch.EndBlockNumber)
if err != nil {
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
break
}
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
if err != nil {
log.Error("MsgProofUpdater: can not update msg proof", "err", err)
break
}
}
}
}
}()
}
func (m *MsgProofUpdater) Stop() {
log.Info("MsgProofUpdater Stop")
}
func (m *MsgProofUpdater) initialize(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
err := m.initializeWithdrawTrie()
if err != nil {
log.Error("can not initialize withdraw trie", "err", err)
// give it some time to retry
time.Sleep(10 * time.Second)
continue
}
return
}
}
}
func (m *MsgProofUpdater) initializeWithdrawTrie() error {
var batch *orm.RollupBatch
firstMsg, err := m.db.GetL2SentMessageByNonce(0)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("failed to get first l2 message: %v", err)
}
// no l2 message
// TO DO: check if we realy dont have l2 sent message with nonce 0
if firstMsg == nil {
log.Info("No first l2sentmsg in db")
return nil
}
// if no batch, return and wait for next try round
batch, err = m.db.GetLatestRollupBatch()
if err != nil {
return fmt.Errorf("failed to get latest batch: %v", err)
}
if batch == nil {
return fmt.Errorf("no batch found")
}
var batches []*orm.RollupBatch
batchIndex := batch.BatchIndex
for {
var msg *orm.L2SentMsg
msg, err = m.db.GetLatestL2SentMsgLEHeight(batch.EndBlockNumber)
if err != nil {
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
}
if msg != nil && msg.MsgProof != "" {
log.Info("Found latest l2 sent msg with proof: ", "msg_proof", msg.MsgProof, "height", msg.Height, "msg_hash", msg.MsgHash)
// initialize withdrawTrie
proofBytes := common.Hex2Bytes(msg.MsgProof)
m.withdrawTrie.Initialize(msg.Nonce, common.HexToHash(msg.MsgHash), proofBytes)
break
}
// append unprocessed batch
batches = append(batches, batch)
if batchIndex == 1 {
// otherwise overflow
// and batchIndex 0 is not in DB
// To Do: check if we dont have batch with index 0 in future
break
}
// iterate for next batch
batchIndex--
batch, err = m.db.GetRollupBatchByIndex(batchIndex)
if err != nil {
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
}
}
log.Info("Build withdraw trie with pending messages")
for i := len(batches) - 1; i >= 0; i-- {
b := batches[i]
msgs, proofs, err := m.appendL2Messages(b.StartBlockNumber, b.EndBlockNumber)
if err != nil {
return err
}
err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
if err != nil {
return err
}
}
log.Info("Build withdraw trie finished")
return nil
}
func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte, batchIndex uint64) error {
if len(msgs) == 0 {
return nil
}
// this should not happend, but double checked
if len(msgs) != len(proofs) {
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
}
dbTx, err := m.db.Beginx()
if err != nil {
return err
}
for i, msg := range msgs {
proofHex := common.Bytes2Hex(proofs[i])
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
if dbTxErr := m.db.UpdateL2MessageProofInDBTx(m.ctx, dbTx, msg.MsgHash, proofHex, batchIndex); dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
return dbTxErr
}
}
if dbTxErr := dbTx.Commit(); dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
return dbTxErr
}
return nil
}
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
var msgProofs [][]byte
messages, err := m.db.GetL2SentMsgMsgHashByHeightRange(firstBlock, lastBlock)
if err != nil {
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
return messages, msgProofs, err
}
if len(messages) == 0 {
return messages, msgProofs, nil
}
// double check whether nonce is matched
if messages[0].Nonce != m.withdrawTrie.NextMessageNonce {
log.Error("L2 message nonce mismatch", "expected", m.withdrawTrie.NextMessageNonce, "found", messages[0].Nonce)
return messages, msgProofs, fmt.Errorf("l2 message nonce mismatch, expected: %v, found: %v", m.withdrawTrie.NextMessageNonce, messages[0].Nonce)
}
var hashes []common.Hash
for _, msg := range messages {
hashes = append(hashes, common.HexToHash(msg.MsgHash))
}
msgProofs = m.withdrawTrie.AppendMessages(hashes)
return messages, msgProofs, nil
}

View File

@@ -1,4 +1,4 @@
package cross_msg
package message_proof
import (
"github.com/ethereum/go-ethereum/common"
@@ -42,7 +42,6 @@ func NewWithdrawTrie() *WithdrawTrie {
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
proof := DecodeBytesToMerkleProof(proofBytes)
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
w.height = len(proof)
w.branches = branches
w.NextMessageNonce = currentMessageNonce + 1

View File

@@ -1,4 +1,4 @@
package cross_msg_test
package message_proof
import (
"math/big"
@@ -7,7 +7,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"bridge-history-api/cross_msg"
"bridge-history-api/utils"
)
@@ -19,29 +18,29 @@ func TestUpdateBranchWithNewMessage(t *testing.T) {
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
}
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
}
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
}
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
}
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
}
}
func TestDecodeEncodeMerkleProof(t *testing.T) {
proof := cross_msg.DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
proof := DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
if len(proof) != 4 {
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
}
@@ -58,7 +57,7 @@ func TestDecodeEncodeMerkleProof(t *testing.T) {
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
}
bytes := cross_msg.EncodeMerkleProofToBytes(proof)
bytes := EncodeMerkleProofToBytes(proof)
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
t.Fatalf("wrong encoded bytes")
}
@@ -72,32 +71,32 @@ func TestRecoverBranchFromProof(t *testing.T) {
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
}
proof := cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
tmpBranches := cross_msg.RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
proof := UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
tmpBranches := RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
proof = UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
tmpBranches = RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
proof = UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
tmpBranches = RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
proof = UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
tmpBranches = RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
@@ -107,7 +106,7 @@ func TestRecoverBranchFromProof(t *testing.T) {
func TestWithdrawTrieOneByOne(t *testing.T) {
for initial := 0; initial < 128; initial++ {
withdrawTrie := cross_msg.NewWithdrawTrie()
withdrawTrie := NewWithdrawTrie()
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
@@ -126,7 +125,7 @@ func TestWithdrawTrieOneByOne(t *testing.T) {
})
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[0])
proof := DecodeBytesToMerkleProof(proofBytes[0])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
}
@@ -153,7 +152,7 @@ func TestWithdrawTrieMultiple(t *testing.T) {
}
for finish := initial; finish < 100; finish++ {
withdrawTrie := cross_msg.NewWithdrawTrie()
withdrawTrie := NewWithdrawTrie()
withdrawTrie.AppendMessages(hashes)
var newHashes []common.Hash
@@ -167,7 +166,7 @@ func TestWithdrawTrieMultiple(t *testing.T) {
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[i-initial])
proof := DecodeBytesToMerkleProof(proofBytes[i-initial])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
}

View File

@@ -94,6 +94,11 @@ func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) e
dbTx.Rollback()
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()

View File

@@ -3,7 +3,7 @@
create table cross_message
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL DEFAULT '',
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
@@ -12,30 +12,30 @@ create table cross_message
layer2_hash VARCHAR NOT NULL DEFAULT '',
layer1_token VARCHAR NOT NULL DEFAULT '',
layer2_token VARCHAR NOT NULL DEFAULT '',
token_id BIGINT NOT NULL DEFAULT 0,
asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL,
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
token_ids TEXT NOT NULL DEFAULT '',
token_amounts TEXT NOT NULL DEFAULT '',
block_timestamp TIMESTAMP(0) DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_msg_hash_msg_type
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
comment
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
comment
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
comment
on column cross_message.is_deleted is 'NotDeleted false, Deleted true';
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
CREATE INDEX valid_l1_msg_index ON cross_message (layer1_hash, is_deleted);
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
CREATE INDEX valid_l2_msg_index ON cross_message (layer2_hash, is_deleted);
CREATE INDEX valid_height_index ON cross_message (height, msg_type, is_deleted);
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
@@ -49,22 +49,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
CREATE OR REPLACE FUNCTION deleted_at_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER deleted_at_trigger
AFTER UPDATE ON cross_message
FOR EACH ROW
EXECUTE FUNCTION deleted_at_trigger();
-- +goose StatementEnd
-- +goose Down

View File

@@ -7,17 +7,17 @@ create table relayed_msg
height BIGINT NOT NULL,
layer1_hash VARCHAR NOT NULL DEFAULT '',
layer2_hash VARCHAR NOT NULL DEFAULT '',
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column relayed_msg.is_deleted is 'NotDeleted, Deleted';
create unique index uk_msg_hash_l1_hash_l2_hash
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
create unique index relayed_msg_hash_uindex
on relayed_msg (msg_hash);
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
@@ -31,22 +31,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
CREATE OR REPLACE FUNCTION deleted_at_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER deleted_at_trigger
AFTER UPDATE ON relayed_msg
FOR EACH ROW
EXECUTE FUNCTION deleted_at_trigger();
-- +goose StatementEnd
-- +goose Down

View File

@@ -0,0 +1,44 @@
-- +goose Up
-- +goose StatementBegin
create table l2_sent_msg
(
id BIGSERIAL PRIMARY KEY,
tx_sender VARCHAR NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
nonce BIGINT NOT NULL,
batch_index BIGINT NOT NULL DEFAULT 0,
msg_proof TEXT NOT NULL DEFAULT '',
msg_data TEXT NOT NULL DEFAULT '',
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_msg_hash
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_sent_msg;
-- +goose StatementEnd

View File

@@ -0,0 +1,39 @@
-- +goose Up
-- +goose StatementBegin
create table rollup_batch
(
id BIGSERIAL PRIMARY KEY,
batch_index BIGINT NOT NULL,
commit_height BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
end_block_number BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_batch_index
on rollup_batch (batch_index) where deleted_at IS NULL;
create unique index uk_batch_hash
on rollup_batch (batch_hash) where deleted_at IS NULL;
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists rollup_batch;
-- +goose StatementEnd

View File

@@ -0,0 +1,70 @@
package orm
import (
"database/sql"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type rollupBatchOrm struct {
db *sqlx.DB
}
type RollupBatch struct {
ID uint64 `json:"id" db:"id"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
BatchHash string `json:"batch_hash" db:"batch_hash"`
CommitHeight uint64 `json:"commit_height" db:"commit_height"`
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
}
// NewRollupBatchOrm create an NewRollupBatchOrm instance
func NewRollupBatchOrm(db *sqlx.DB) RollupBatchOrm {
return &rollupBatchOrm{db: db}
}
func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*RollupBatch) error {
if len(batches) == 0 {
return nil
}
var err error
batchMaps := make([]map[string]interface{}, len(batches))
for i, batch := range batches {
batchMaps[i] = map[string]interface{}{
"commit_height": batch.CommitHeight,
"batch_index": batch.BatchIndex,
"batch_hash": batch.BatchHash,
"start_block_number": batch.StartBlockNumber,
"end_block_number": batch.EndBlockNumber,
}
}
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
if err != nil {
log.Error("BatchInsertRollupBatchDBTx: failed to insert batch event msgs", "err", err)
return err
}
return nil
}
func (b *rollupBatchOrm) GetLatestRollupBatch() (*RollupBatch, error) {
result := &RollupBatch{}
row := b.db.QueryRowx(`SELECT id, batch_index, commit_height, batch_hash, start_block_number, end_block_number FROM rollup_batch ORDER BY batch_index DESC LIMIT 1;`)
if err := row.StructScan(result); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err
}
return result, nil
}
func (b *rollupBatchOrm) GetRollupBatchByIndex(index uint64) (*RollupBatch, error) {
result := &RollupBatch{}
row := b.db.QueryRowx(`SELECT id, batch_index, batch_hash, commit_height, start_block_number, end_block_number FROM rollup_batch WHERE batch_index = $1;`, index)
if err := row.StructScan(result); err != nil {
return nil, err
}
return result, nil
}

View File

@@ -40,31 +40,24 @@ const (
// CrossMsg represents a cross message from layer 1 to layer 2
type CrossMsg struct {
ID uint64 `json:"id" db:"id"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Amount string `json:"amount" db:"amount"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenID uint64 `json:"token_id" db:"token_id"`
Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"`
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
ID uint64 `json:"id" db:"id"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Amount string `json:"amount" db:"amount"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenIDs string `json:"token_ids" db:"token_ids"`
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"`
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
// L1CrossMsgOrm provides operations on l1_cross_message table
@@ -72,27 +65,27 @@ type L1CrossMsgOrm interface {
GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error)
GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL1CrossMsgHash invoked when SentMessage event is received
// UpdateL1CrossMsgHashDBTx invoked when SentMessage event is received
UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
GetLatestL1ProcessedHeight() (int64, error)
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error
GetL1EarliestNoBlocktimestampHeight() (uint64, error)
UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error
GetL1EarliestNoBlockTimestampHeight() (uint64, error)
}
// L2CrossMsgOrm provides operations on l2_cross_message table
// L2CrossMsgOrm provides operations on cross_message table
type L2CrossMsgOrm interface {
GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error)
GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL2CrossMsgHash invoked when SentMessage event is received
// UpdateL2CrossMsgHashDBTx invoked when SentMessage event is received
UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
GetLatestL2ProcessedHeight() (int64, error)
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlocktimestampHeight() (uint64, error)
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
}
type RelayedMsgOrm interface {
@@ -103,3 +96,21 @@ type RelayedMsgOrm interface {
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
type L2SentMsgOrm interface {
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
GetLatestSentMsgHeightOnL2() (int64, error)
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
GetLatestL2SentMsgBatchIndex() (int64, error)
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
type RollupBatchOrm interface {
GetLatestRollupBatch() (*RollupBatch, error)
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)
BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, messages []*RollupBatch) error
}

View File

@@ -22,7 +22,7 @@ func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND NOT is_deleted;`, l1Hash.String(), Layer1Msg)
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
@@ -36,7 +36,7 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
// Warning: return empty slice if no data found
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND NOT is_deleted;`, sender.String(), Layer1Msg)
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
for rows.Next() {
msg := &CrossMsg{}
@@ -65,25 +65,26 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
"target": msg.Target,
"amount": msg.Amount,
"asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer1_hash": msg.Layer1Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_id": msg.TokenID,
"token_ids": msg.TokenIDs,
"msg_type": Layer1Msg,
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer1_hash = :layer1_hash AND NOT is_deleted);`, messageMaps[i])
if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "l1hashes", msg.Layer1Hash, "heights", msg.Height, "err", err)
break
}
}
return err
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
return err
}
return nil
}
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
@@ -91,7 +92,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx
}
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
@@ -99,7 +100,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHas
}
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer1Msg)
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
@@ -114,21 +115,21 @@ func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
}
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
if _, err := l.db.Exec(`UPDATE cross_message SET is_deleted = true WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) GetL1EarliestNoBlocktimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
var result uint64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows {

View File

@@ -22,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
@@ -32,11 +32,11 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
return result, nil
}
// GetL2CrossMsgsByAddress returns all layer2 cross messages under given address
// GetL2CrossMsgByAddress returns all layer2 cross messages under given address
// Warning: return empty slice if no data found
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND NOT is_deleted;`, sender.String(), Layer2Msg)
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
for rows.Next() {
msg := &CrossMsg{}
@@ -55,7 +55,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossM
}
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE cross_message SET is_deleted = true where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
if err != nil {
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
return err
@@ -71,45 +71,44 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"asset": msg.Asset,
"msg_hash": msg.MsgHash,
"layer2_hash": msg.Layer2Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_id": msg.TokenID,
"token_ids": msg.TokenIDs,
"amount": msg.Amount,
"msg_type": Layer2Msg,
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer2_hash = :layer2_hash AND NOT is_deleted);`, messageMaps[i])
if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "layer2hash", msg.Layer2Hash, "heights", msg.Height, "err", err)
break
}
}
return err
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
return err
}
return nil
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer2Msg)
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
@@ -123,15 +122,15 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
return 0, nil
}
func (l *l2CrossMsgOrm) UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) GetL2EarliestNoBlocktimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
var result uint64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows {

View File

@@ -0,0 +1,151 @@
package orm
import (
"context"
"database/sql"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type L2SentMsg struct {
ID uint64 `json:"id" db:"id"`
TxSender string `json:"tx_sender" db:"tx_sender"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Value string `json:"value" db:"value"`
Height uint64 `json:"height" db:"height"`
Nonce uint64 `json:"nonce" db:"nonce"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
MsgProof string `json:"msg_proof" db:"msg_proof"`
MsgData string `json:"msg_data" db:"msg_data"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
type l2SentMsgOrm struct {
db *sqlx.DB
}
// NewL2SentMsgOrm create an NewRollupBatchOrm instance
func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
return &l2SentMsgOrm{db: db}
}
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
if err := row.StructScan(result); err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"tx_sender": msg.TxSender,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"nonce": msg.Nonce,
"batch_index": msg.BatchIndex,
"msg_proof": msg.MsgProof,
"msg_data": msg.MsgData,
}
}
_, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err
}
return err
}
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
return err
}
return nil
}
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index !=0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return -1, err
}
if result.Valid {
return result.Int64, nil
}
return -1, nil
}
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
if err != nil {
return nil, err
}
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
return results, err
}
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
err := row.StructScan(result)
if err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
err := row.StructScan(result)
if err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
return err
}

View File

@@ -3,12 +3,18 @@ package orm
import (
"database/sql"
"errors"
"strings"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
}
type relayedMsgOrm struct {
db *sqlx.DB
}
@@ -31,19 +37,18 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
"layer1_hash": msg.Layer1Hash,
"layer2_hash": msg.Layer2Hash,
}
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps[i])
if err != nil && !strings.Contains(err.Error(), "pq: duplicate key value violates unique constraint \"relayed_msg_hash_uindex") {
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", msg.MsgHash, "height", msg.Height, "err", err)
break
}
}
return err
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
if err != nil {
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
return err
}
return nil
}
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
result := &RelayedMsg{}
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msg_hash)
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
@@ -54,7 +59,7 @@ func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
@@ -69,7 +74,7 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
@@ -84,11 +89,11 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
}
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer1_hash != '';`, height)
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
return err
}
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer2_hash != '';`, height)
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
return err
}

View File

@@ -16,6 +16,8 @@ type OrmFactory interface {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
orm.L2SentMsgOrm
orm.RollupBatchOrm
GetTotalCrossMsgCountByAddress(sender string) (uint64, error)
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
GetDB() *sqlx.DB
@@ -27,6 +29,8 @@ type ormFactory struct {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
orm.L2SentMsgOrm
orm.RollupBatchOrm
*sqlx.DB
}
@@ -45,10 +49,12 @@ func NewOrmFactory(cfg *config.Config) (OrmFactory, error) {
}
return &ormFactory{
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
DB: db,
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
L2SentMsgOrm: orm.NewL2SentMsgOrm(db),
RollupBatchOrm: orm.NewRollupBatchOrm(db),
DB: db,
}, nil
}
@@ -62,7 +68,7 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
var count uint64
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
if err := row.Scan(&count); err != nil {
return 0, err
}
@@ -72,7 +78,7 @@ func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, erro
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
para := sender
var results []*orm.CrossMsg
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
if err != nil || rows == nil {
return nil, err
}

View File

@@ -1,6 +1,7 @@
package service
import (
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -19,15 +20,27 @@ type Finalized struct {
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
type UserClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
BatchHash string `json:"batch_hash"`
Message string `json:"message"`
Proof string `json:"proof"`
BatchIndex string `json:"batch_index"`
}
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
CreatedAt *time.Time `json:"createdTime"`
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
ClaimInfo *UserClaimInfo `json:"claimInfo"`
CreatedAt *time.Time `json:"createdTime"`
}
// HistoryService example service.
@@ -47,6 +60,30 @@ type historyBackend struct {
db db.OrmFactory
}
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
if err != nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
batch, err := db.GetRollupBatchByIndex(l2sentMsg.BatchIndex)
if err != nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
return &UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
}
}
func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory) {
relayed, err := db.GetRelayedMsgByHash(msgHash)
if err != nil {
@@ -76,6 +113,7 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
return txHistories, 0, err
}
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
if err != nil {
return nil, 0, err
}
@@ -91,6 +129,7 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(msg.MsgHash, h.db),
}
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
@@ -138,6 +177,7 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(l2result.MsgHash, h.db),
}
updateCrossTxHash(l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)

View File

@@ -0,0 +1,310 @@
package utils
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
backendabi "bridge-history-api/abi"
"bridge-history-api/db/orm"
)
type MsgHashWrapper struct {
MsgHash common.Hash
TxHash common.Hash
}
type L2SentMsgWrapper struct {
L2SentMsg *orm.L2SentMsg
TxHash common.Hash
}
type CachedParsedTxCalldata struct {
CallDataIndex uint64
BatchIndices []uint64
StartBlocks []uint64
EndBlocks []uint64
}
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []MsgHashWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.DepositETH{}
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(),
})
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
})
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
})
case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, MsgHashWrapper{
MsgHash: msgHash,
TxHash: vlog.TxHash})
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer1Hash: vlog.TxHash.Hex(),
})
}
}
return l1CrossMsg, msgHashes, relayedMsgs, nil
}
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg
var l2SentMsg []L2SentMsgWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.DepositETH{}
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(),
})
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
})
case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsg = append(l2SentMsg,
L2SentMsgWrapper{
TxHash: vlog.TxHash,
L2SentMsg: &orm.L2SentMsg{
Sender: event.Sender.Hex(),
Target: event.Target.Hex(),
Value: event.Value.String(),
MsgHash: msgHash.Hex(),
Height: vlog.BlockNumber,
Nonce: event.MessageNonce.Uint64(),
MsgData: hexutil.Encode(event.Message),
},
})
case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer2Hash: vlog.TxHash.Hex(),
})
}
}
return l2CrossMsg, relayedMsgs, l2SentMsg, nil
}
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata)
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSignature:
event := backendabi.L1CommitBatchEvent{}
err := UnpackLog(backendabi.ScrollChainABI, &event, "CommitBatch", vlog)
if err != nil {
log.Warn("Failed to unpack CommitBatch event", "err", err)
return rollupBatches, err
}
if _, ok := cache[vlog.TxHash.Hex()]; ok {
c := cache[vlog.TxHash.Hex()]
c.CallDataIndex++
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: c.BatchIndices[c.CallDataIndex],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
})
cache[vlog.TxHash.Hex()] = c
continue
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return rollupBatches, err
}
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data())
if err != nil {
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
return rollupBatches, err
}
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
CallDataIndex: 0,
BatchIndices: indices,
StartBlocks: startBlocks,
EndBlocks: endBlocks,
}
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: indices[0],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: startBlocks[0],
EndBlockNumber: endBlocks[0],
})
default:
continue
}
}
return rollupBatches, nil
}

View File

@@ -3,6 +3,7 @@ package utils
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/big"
@@ -62,6 +63,53 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldataV2 find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error) {
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return 0, 0, 0, err
}
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, 0, err
}
var startBlock uint64
var finishBlock uint64
// decode batchIndex from ParentBatchHeader
if len(args.ParentBatchHeader) < 9 {
return 0, 0, 0, errors.New("invalid parent batch header")
}
batchIndex := binary.BigEndian.Uint64(args.ParentBatchHeader[1:9]) + 1
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(args.Chunks) == 0 {
return 0, 0, 0, errors.New("invalid chunks")
}
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return batchIndex, startBlock, finishBlock, err
}
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
var batchIndices []uint64

View File

@@ -20,6 +20,22 @@ func TestKeccak2(t *testing.T) {
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
}
func TestGetBatchRangeFromCalldataV2(t *testing.T) {
// single chunk
batchIndex, start, finish, err := utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(1))
assert.Equal(t, finish, uint64(1))
assert.Equal(t, batchIndex, uint64(1))
// multiple chunk
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(10))
assert.Equal(t, finish, uint64(20))
assert.Equal(t, batchIndex, uint64(2))
}
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
assert.NoError(t, err)

File diff suppressed because one or more lines are too long

View File

@@ -73,6 +73,19 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
assert.NoError(err)
}
func TestPackImportGenesisBatch(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(err)
batchHeader := []byte{}
stateRoot := common.Hash{}
_, err = l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
assert.NoError(err)
}
func TestPackRelayL1Message(t *testing.T) {
assert := assert.New(t)

View File

@@ -83,7 +83,7 @@ func action(ctx *cli.Context) error {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -31,6 +31,7 @@ func init() {
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Flags = append(app.Flags, cutils.RollupRelayerFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
@@ -70,7 +71,8 @@ func action(ctx *cli.Context) error {
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -3,8 +3,10 @@ package relayer
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -42,10 +44,10 @@ type Layer2Relayer struct {
l2Client *ethclient.Client
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
l2MessageOrm *orm.L2Message
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
cfg *config.RelayerConfig
@@ -78,7 +80,7 @@ type Layer2Relayer struct {
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
@@ -115,11 +117,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
l2MessageOrm: orm.NewL2Message(db),
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
l2Client: l2Client,
@@ -142,10 +144,127 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
}
// Initialize genesis before we do anything else
if initGenesis {
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
func (r *Layer2Relayer) initializeGenesis() error {
if count, err := r.batchOrm.GetBatchCount(r.ctx); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
log.Info("genesis already imported", "batch count", count)
return nil
}
genesis, err := r.l2Client.HeaderByNumber(r.ctx, big.NewInt(0))
if err != nil {
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
}
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &bridgeTypes.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{{
Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
}},
}
err = r.db.Transaction(func(dbTX *gorm.DB) error {
var dbChunk *orm.Chunk
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk: %v", err)
}
if err = r.chunkOrm.UpdateProvingStatus(r.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
}
var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
if err = r.chunkOrm.UpdateBatchHashInRange(r.ctx, 0, 0, batch.Hash, dbTX); err != nil {
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
}
if err = r.batchOrm.UpdateProvingStatus(r.ctx, batch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
}
if err = r.batchOrm.UpdateRollupStatus(r.ctx, batch.Hash, types.RollupFinalized, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
}
// commit genesis batch on L1
// note: we do this inside the DB transaction so that we can revert all DB changes if this step fails
return r.commitGenesisBatch(batch.Hash, batch.BatchHeader, common.HexToHash(batch.StateRoot))
})
if err != nil {
return fmt.Errorf("update genesis transaction failed: %v", err)
}
log.Info("successfully imported genesis chunk and batch")
return nil
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if err != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
}
// submit genesis batch to L1 rollup contract
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
// print progress
case <-ticker.C:
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
// timeout
case <-time.After(5 * time.Minute):
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
// handle confirmation
case confirmation := <-r.rollupSender.ConfirmChan():
if confirmation.ID != batchHash {
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
}
if !confirmation.IsSuccessful {
return fmt.Errorf("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
return nil
}
}
}
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
@@ -400,7 +519,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is CommitBatches transaction
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"

View File

@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -57,12 +57,12 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
@@ -73,36 +73,36 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, types.ProvingTaskVerified)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
@@ -113,26 +113,26 @@ func testL2RelayerSkipBatches(t *testing.T) {
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, provingStatus)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batchHash
return batch.Hash
}
skipped := []string{
@@ -177,7 +177,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -187,9 +187,9 @@ func testL2RelayerRollupConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
var err error
batchHashes[i], err = batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
for i, key := range processingKeys[:2] {
@@ -235,17 +235,17 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
defer bridgeUtils.CloseDB(db)
batchOrm := orm.NewBatch(db)
batchHash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err)
batchHash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -255,8 +255,8 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
}
confirmations := []BatchConfirmation{
{batchHash: batchHash1, isSuccessful: true},
{batchHash: batchHash2, isSuccessful: false},
{batchHash: batch1.Hash, isSuccessful: true},
{batchHash: batch2.Hash, isSuccessful: false},
}
for _, confirmation := range confirmations {
@@ -283,7 +283,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)

View File

@@ -72,14 +72,13 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
endChunkIndex := dbChunks[numChunks-1].Index
endChunkHash := dbChunks[numChunks-1].Hash
err = p.db.Transaction(func(dbTX *gorm.DB) error {
var batchHash string
batchHash, err = p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if err != nil {
return err
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if dbErr != nil {
return dbErr
}
err = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batchHash, dbTX)
if err != nil {
return err
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
return dbErr
}
return nil
})

View File

@@ -106,15 +106,6 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
)
}
if totalTxGasUsed > p.maxTxGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx gas limit; block number: %v, gas used: %v, max gas limit: %v",
firstBlock.Header.Number,
totalTxGasUsed,
p.maxTxGasPerChunk,
)
}
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
@@ -133,6 +124,16 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
)
}
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
for i, block := range blocks[1:] {
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()

View File

@@ -24,10 +24,9 @@ import (
)
var (
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type rollupEvent struct {
@@ -41,7 +40,6 @@ type L1WatcherClient struct {
ctx context.Context
client *ethclient.Client
l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
l1BlockOrm *orm.L1Block
batchOrm *orm.Batch
@@ -91,7 +89,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
l1MessageOrm: l1MessageOrm,
l1BlockOrm: l1BlockOrm,
batchOrm: orm.NewBatch(db),
l2MessageOrm: orm.NewL2Message(db),
confirmations: confirmations,
messengerAddress: messengerAddress,
@@ -227,18 +224,16 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
sentMessageCount := int64(len(sentMessageEvents))
relayedMessageCount := int64(len(relayedMessageEvents))
rollupEventCount := int64(len(rollupEvents))
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
bridgeL1MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount, "RollupEventCount", rollupEventCount)
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
// use rollup event to update rollup results db status
var batchHashes []string
@@ -272,21 +267,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
msgStatus = types.MsgConfirmed
} else {
msgStatus = types.MsgFailed
}
if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
@@ -298,11 +278,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
return nil
}
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1Messages []*orm.L1Message
var relayedMessages []relayedMessage
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
@@ -311,7 +290,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
return l1Messages, rollupEvents, err
}
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
@@ -327,38 +306,12 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
GasLimit: event.GasLimit.Uint64(),
Layer1Hash: vLog.TxHash.Hex(),
})
case bridgeAbi.L1RelayedMessageEventSignature:
event := bridgeAbi.L1RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case bridgeAbi.L1FailedRelayedMessageEventSignature:
event := bridgeAbi.L1FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case bridgeAbi.L1CommitBatchEventSignature:
event := bridgeAbi.L1CommitBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
return l1Messages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
@@ -371,7 +324,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
return l1Messages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
@@ -384,5 +337,5 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
}
}
return l1Messages, relayedMessages, rollupEvents, nil
return l1Messages, rollupEvents, nil
}

View File

@@ -159,14 +159,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
convey.Convey("parse bridge event logs failure", t, func() {
targetErr := errors.New("parse log failure")
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
return nil, nil, nil, targetErr
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
return nil, nil, targetErr
})
err := watcher.FetchContractEvent()
assert.EqualError(t, err, targetErr.Error())
})
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
rollupEvents := []rollupEvent{
{
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
@@ -179,20 +179,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
status: commonTypes.RollupCommitted,
},
}
relayedMessageEvents := []relayedMessage{
{
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
isSuccessful: true,
},
{
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
isSuccessful: false,
},
}
return nil, relayedMessageEvents, rollupEvents, nil
return nil, rollupEvents, nil
})
var batchOrm *orm.Batch
@@ -250,20 +237,6 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
return nil
})
var l2MessageOrm *orm.L2Message
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return targetErr
})
err := watcher.FetchContractEvent()
assert.Equal(t, targetErr.Error(), err.Error())
})
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
return nil
})
var l1MessageOrm *orm.L1Message
convey.Convey("db save l1 message failure", t, func() {
targetErr := errors.New("SaveL1Messages failure")
@@ -303,10 +276,9 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
@@ -323,102 +295,14 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
assert.Len(t, l2Messages, 1)
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
})
}
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db)
logs := []types.Log{
{
Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db)
logs := []types.Log{
{
Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, rollupEvents)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db)
@@ -437,10 +321,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
@@ -453,10 +336,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
@@ -481,10 +363,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Empty(t, rollupEvents)
})
@@ -497,10 +378,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
assert.Len(t, rollupEvents, 1)
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)

View File

@@ -2,7 +2,6 @@ package watcher
import (
"context"
"errors"
"fmt"
"math/big"
@@ -32,8 +31,6 @@ var (
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
@@ -44,12 +41,8 @@ type L2WatcherClient struct {
*ethclient.Client
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
l1MessageOrm *orm.L1Message
l2MessageOrm *orm.L2Message
confirmations rpc.BlockNumber
@@ -68,24 +61,29 @@ type L2WatcherClient struct {
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
l2MessageOrm := orm.NewL2Message(db)
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
if err != nil {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
if err != nil || l1msg == nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
} else {
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
if err != nil || receipt == nil {
log.Warn("get tx from l2 failed", "err", err)
savedHeight = 0
} else {
savedHeight = receipt.BlockNumber.Uint64()
}
}
w := L2WatcherClient{
ctx: ctx,
db: db,
Client: client,
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
l1MessageOrm: orm.NewL1Message(db),
l2MessageOrm: l2MessageOrm,
processedMsgHeight: uint64(savedHeight),
processedMsgHeight: savedHeight,
confirmations: confirmations,
messengerAddress: messengerAddress,
@@ -98,78 +96,9 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
stopped: 0,
}
// Initialize genesis before we do anything else
if err := w.initializeGenesis(); err != nil {
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
}
return &w
}
func (w *L2WatcherClient) initializeGenesis() error {
if count, err := w.batchOrm.GetBatchCount(w.ctx); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
log.Info("genesis already imported", "batch count", count)
return nil
}
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
if err != nil {
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
}
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &bridgeTypes.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{{
Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
}},
}
err = w.db.Transaction(func(dbTX *gorm.DB) error {
var dbChunk *orm.Chunk
dbChunk, err = w.chunkOrm.InsertChunk(w.ctx, chunk, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk: %v", err)
}
if err = w.chunkOrm.UpdateProvingStatus(w.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
}
var batchHash string
batchHash, err = w.batchOrm.InsertBatch(w.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
if err = w.chunkOrm.UpdateBatchHashInRange(w.ctx, 0, 0, batchHash, dbTX); err != nil {
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
}
if err = w.batchOrm.UpdateProvingStatus(w.ctx, batchHash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
}
if err = w.batchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
}
return nil
})
if err != nil {
return fmt.Errorf("update genesis transaction failed: %v", err)
}
log.Info("successfully imported genesis chunk and batch")
return nil
}
const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
@@ -201,10 +130,20 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs {
v, r, s := tx.RawSignatureValues()
nonce := tx.Nonce()
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
// does not have this field. Since `L1MessageTx` do not have a nonce,
// we reuse this field for storing the queue index.
if msg := tx.AsL1MessageTx(); msg != nil {
nonce = msg.QueueIndex
}
txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(),
TxHash: tx.Hash().String(),
Nonce: tx.Nonce(),
Nonce: nonce,
ChainId: (*hexutil.Big)(tx.ChainId()),
Gas: tx.Gas(),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
@@ -302,17 +241,15 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
sentMessageCount := int64(len(sentMessageEvents))
relayedMessageCount := int64(len(relayedMessageEvents))
bridgeL2MsgsSentEventsTotalCounter.Inc(sentMessageCount)
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
log.Info("L2 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount)
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
@@ -329,71 +266,24 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
}
if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
bridgeL2MsgsSyncHeightGauge.Update(to)
}
}
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2Messages []orm.L2Message
var relayedMessages []relayedMessage
var lastAppendMsgHash common.Hash
var lastAppendMsgNonce uint64
for _, vLog := range logs {
switch vLog.Topics[0] {
case bridgeAbi.L2SentMessageEventSignature:
event := bridgeAbi.L2SentMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err)
return l2Messages, relayedMessages, err
}
computedMsgHash := utils.ComputeMessageHash(
event.Sender,
event.Target,
event.Value,
event.MessageNonce,
event.Message,
)
// `AppendMessage` event is always emitted before `SentMessage` event
// So they should always match, just double check
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
return l2Messages, relayedMessages, errors.New(errMsg)
}
if computedMsgHash != lastAppendMsgHash {
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
return l2Messages, relayedMessages, errors.New(errMsg)
}
l2Messages = append(l2Messages, orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: computedMsgHash.String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case bridgeAbi.L2RelayedMessageEventSignature:
event := bridgeAbi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
return l2Messages, relayedMessages, err
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
@@ -406,7 +296,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
return l2Messages, relayedMessages, err
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
@@ -414,21 +304,9 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
txHash: vLog.TxHash,
isSuccessful: false,
})
case bridgeAbi.L2AppendMessageEventSignature:
event := bridgeAbi.L2AppendMessageEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
return l2Messages, relayedMessages, err
}
lastAppendMsgHash = event.MessageHash
lastAppendMsgNonce = event.Index.Uint64()
bridgeL2MsgsAppendEventsTotalCounter.Inc(1)
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
}
return l2Messages, relayedMessages, nil
return relayedMessages, nil
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi"
@@ -67,137 +66,6 @@ func testCreateNewWatcherAndStop(t *testing.T) {
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func testMonitorBridgeContract(t *testing.T) {
wc, db := setupL2Watcher(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
defer utils.CloseDB(db)
}()
loopToFetchEvent(subCtx, wc)
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareWatcherClient(l2Cli, db, address)
loopToFetchEvent(subCtx, rc)
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool {
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight)
}))
// check l1 messages.
assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 2
}))
}
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
_, db := setupL2Watcher(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
defer utils.CloseDB(db)
}()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
wc := prepareWatcherClient(l2Cli, db, address)
loopToFetchEvent(subCtx, wc)
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *gethTypes.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
l2MessageOrm := orm.NewL2Message(db)
// check if we successfully stored events
assert.True(t, cutils.TryTimes(10, func() bool {
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
return err == nil && height > int64(previousHeight)
}))
assert.True(t, cutils.TryTimes(10, func() bool {
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
return err == nil && len(msgs) == 5
}))
}
func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t)
defer utils.CloseDB(db)
@@ -244,57 +112,6 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{
bridgeAbi.L2SentMessageEventSignature,
},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack SentMessage log failure", t, func() {
targetErr := errors.New("UnpackLog SentMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2SentMessageEventSignature success", t, func() {
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
tmpValue := big.NewInt(1000)
tmpMessageNonce := big.NewInt(100)
tmpMessage := []byte("test for L2SentMessageEventSignature")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
tmpOut.Sender = tmpSendAddr
tmpOut.Value = tmpValue
tmpOut.Target = tmpTargetAddr
tmpOut.MessageNonce = tmpMessageNonce
tmpOut.Message = tmpMessage
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.Error(t, err)
assert.Empty(t, relayedMessages)
assert.Empty(t, l2Messages)
})
}
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db)
@@ -314,9 +131,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
@@ -329,9 +145,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
@@ -356,9 +171,8 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
@@ -371,51 +185,9 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack AppendMessage log failure", t, func() {
targetErr := errors.New("UnpackLog AppendMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
convey.Convey("L2AppendMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
tmpOut.MessageHash = msgHash
tmpOut.Index = big.NewInt(100)
return nil
})
defer patchGuard.Reset()
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Empty(t, l2Messages)
assert.Empty(t, relayedMessages)
})
}

View File

@@ -100,20 +100,14 @@ func TestFunction(t *testing.T) {
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
// Run chunk-proposer test cases.
t.Run("TestChunkProposer", testChunkProposer)

View File

@@ -189,9 +189,9 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (string, error) {
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return "", errors.New("invalid args")
return nil, errors.New("invalid args")
}
db := o.db
@@ -202,7 +202,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err)
return "", err
return nil, err
}
var batchIndex uint64
@@ -221,7 +221,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return "", err
return nil, err
}
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
@@ -233,7 +233,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
return "", err
return nil, err
}
numChunks := len(chunks)
@@ -255,10 +255,10 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err)
return "", err
return nil, err
}
return newBatch.Hash, nil
return &newBatch, nil
}
// UpdateSkippedBatches updates the skipped batches in the database.

View File

@@ -52,6 +52,16 @@ func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
return -1, nil
}
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
var msg *L1Message
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
if err != nil {
return nil, err
}
return msg, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
var msgs []L1Message

View File

@@ -1,127 +0,0 @@
package orm
import (
"context"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
db *gorm.DB `gorm:"column:-"`
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Sender string `json:"sender" gorm:"column:sender"`
Value string `json:"value" gorm:"column:value"`
Target string `json:"target" gorm:"column:target"`
Calldata string `json:"calldata" gorm:"column:calldata"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:NULL"`
Proof string `json:"proof" gorm:"column:proof;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"`
}
// NewL2Message create an L2Message instance
func NewL2Message(db *gorm.DB) *L2Message {
return &L2Message{db: db}
}
// TableName define the L2Message table name
func (*L2Message) TableName() string {
return "l2_message"
}
// GetL2Messages fetch list of messages given msg status
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
var l2MsgList []L2Message
db := m.db
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if err := db.Find(&l2MsgList).Error; err != nil {
return nil, err
}
return l2MsgList, nil
}
// GetLayer2LatestWatchedHeight returns latest height stored in the table
func (m *L2Message) GetLayer2LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
result := m.db.Model(&L2Message{}).Select("COALESCE(MAX(height), -1)").Row()
if result.Err() != nil {
return -1, result.Err()
}
var maxNumber int64
if err := result.Scan(&maxNumber); err != nil {
return 0, err
}
return maxNumber, nil
}
// GetL2MessageByNonce fetch message by nonce
// for unit test
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
var msg L2Message
err := m.db.Where("nonce", nonce).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL2Messages batch save a list of layer2 messages
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
nonces = append(nonces, msg.Nonce)
heights = append(heights, msg.Height)
}
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
}
return err
}
// UpdateLayer2Status updates message stauts, given message hash
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error
if err != nil {
return err
}
return nil
}
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
updateFields := map[string]interface{}{
"status": int(status),
"layer1_hash": layer1Hash,
}
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
if err != nil {
return err
}
return nil
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 6, int(cur))
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -30,4 +30,4 @@ on l1_block (number);
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd
-- +goose StatementEnd

View File

@@ -1,37 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -188,20 +188,22 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
hash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err := batchOrm.GetBatchByIndex(context.Background(), 0)
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
hash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err := batchOrm.GetBatchByIndex(context.Background(), 1)
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err)

View File

@@ -36,7 +36,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
// the next queue index that we need to process
nextIndex := totalL1MessagePoppedBefore
for _, chunk := range chunks {
for chunkID, chunk := range chunks {
// build data hash
totalL1MessagePoppedBeforeChunk := nextIndex
chunkHash, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
@@ -46,7 +46,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
dataBytes = append(dataBytes, chunkHash.Bytes()...)
// build skip bitmap
for _, block := range chunk.Blocks {
for blockID, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
continue
@@ -54,7 +54,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
currentIndex := tx.Nonce
if currentIndex < nextIndex {
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
}
// mark skipped messages

View File

@@ -68,7 +68,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// add fake chunk

View File

@@ -29,7 +29,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Create L1Watcher

View File

@@ -5,8 +5,6 @@ import (
"database/sql"
"fmt"
"time"
"scroll-tech/common/types/message"
)
// L1BlockStatus represents current l1 block processing status
@@ -161,11 +159,18 @@ type RollerStatus struct {
// SessionInfo is assigned rollers info of a block batch (session)
type SessionInfo struct {
ID string `json:"id"`
Rollers map[string]*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
Attempts uint8 `json:"attempts,omitempty"`
ProveType message.ProveType `json:"prove_type,omitempty"`
ID int `json:"id" db:"id"`
TaskID string `json:"task_id" db:"task_id"`
RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
ProveType int16 `json:"prove_type" db:"prove_type"`
RollerName string `json:"roller_name" db:"roller_name"`
ProvingStatus int16 `json:"proving_status" db:"proving_status"`
FailureType int16 `json:"failure_type" db:"failure_type"`
Reward uint64 `json:"reward" db:"reward"`
Proof []byte `json:"proof" db:"proof"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)

View File

@@ -16,6 +16,10 @@ var (
&MetricsAddr,
&MetricsPort,
}
// RollupRelayerFlags contains flags only used in rollup-relayer
RollupRelayerFlags = []cli.Flag{
&ImportGenesisFlag,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
@@ -66,4 +70,10 @@ var (
Category: "METRICS",
Value: 6060,
}
// ImportGenesisFlag import genesis batch during startup
ImportGenesisFlag = cli.BoolFlag{
Name: "import-genesis",
Usage: "Import genesis batch into L1 contract during startup",
Value: false,
}
)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.0"
var tag = "v4.0.7-fix"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -71,7 +71,7 @@ Reference testnet [run_deploy_contracts.sh](https://github.com/scroll-tech/testn
## Deployment using Foundry
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_ZK_ROLLUP_PROXY_ADDR` as environment variables.
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_SCROLL_CHAIN_PROXY_ADDR` as environment variables.
```bash
# allexport
@@ -101,4 +101,4 @@ $ source .env.l2_addresses
# Initialize contracts
$ forge script scripts/foundry/InitializeL1BridgeContracts.s.sol:InitializeL1BridgeContracts --rpc-url $SCROLL_L1_RPC --broadcast
$ forge script scripts/foundry/InitializeL2BridgeContracts.s.sol:InitializeL2BridgeContracts --rpc-url $SCROLL_L2_RPC --broadcast
```
```

View File

@@ -11,7 +11,7 @@ async function main() {
const ScrollChainCommitmentVerifier = await ethers.getContractFactory("ScrollChainCommitmentVerifier", deployer);
const L1ScrollChainAddress = process.env.L1_ZK_ROLLUP_PROXY_ADDR!;
const L1ScrollChainAddress = process.env.L1_SCROLL_CHAIN_PROXY_ADDR!;
let PoseidonUnit2Address = process.env.POSEIDON_UNIT2_ADDR;
if (!PoseidonUnit2Address) {

View File

@@ -7,18 +7,18 @@ import {console} from "forge-std/console.sol";
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
import {TransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
import {L1CustomERC20Gateway} from "../../src/L1/gateways/L1CustomERC20Gateway.sol";
import {L1ERC1155Gateway} from "../../src/L1/gateways/L1ERC1155Gateway.sol";
import {L1ERC721Gateway} from "../../src/L1/gateways/L1ERC721Gateway.sol";
import {L1ETHGateway} from "../../src/L1/gateways/L1ETHGateway.sol";
import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
import {RollupVerifier} from "../../src/libraries/verifier/RollupVerifier.sol";
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
@@ -29,14 +29,14 @@ contract DeployL1BridgeContracts is Script {
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
address L1_ZKEVM_VERIFIER_ADDR = vm.envAddress("L1_ZKEVM_VERIFIER_ADDR");
ProxyAdmin proxyAdmin;
function run() external {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
// note: the RollupVerifier library is deployed implicitly
deployMultipleVersionRollupVerifier();
deployProxyAdmin();
deployL1Whitelist();
deployL1MessageQueue();
@@ -55,6 +55,12 @@ contract DeployL1BridgeContracts is Script {
vm.stopBroadcast();
}
function deployMultipleVersionRollupVerifier() internal {
MultipleVersionRollupVerifier rollupVerifier = new MultipleVersionRollupVerifier(L1_ZKEVM_VERIFIER_ADDR);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
}
function deployProxyAdmin() internal {
proxyAdmin = new ProxyAdmin();
@@ -76,8 +82,8 @@ contract DeployL1BridgeContracts is Script {
new bytes(0)
);
logAddress("L1_ZK_ROLLUP_IMPLEMENTATION_ADDR", address(impl));
logAddress("L1_ZK_ROLLUP_PROXY_ADDR", address(proxy));
logAddress("L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR", address(impl));
logAddress("L1_SCROLL_CHAIN_PROXY_ADDR", address(proxy));
}
function deployL1MessageQueue() internal {
@@ -170,8 +176,8 @@ contract DeployL1BridgeContracts is Script {
new bytes(0)
);
logAddress("ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
logAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
logAddress("L1_ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
logAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
}
function deployL1CustomERC20Gateway() internal {

View File

@@ -1,36 +1,36 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.10;
import { Script } from "forge-std/Script.sol";
import { console } from "forge-std/console.sol";
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import { ScrollChainCommitmentVerifier } from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
import {ScrollChainCommitmentVerifier} from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
contract DeployScrollChainCommitmentVerifier is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
function run() external {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
function run() external {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
deployScrollChainCommitmentVerifier();
deployScrollChainCommitmentVerifier();
vm.stopBroadcast();
}
vm.stopBroadcast();
}
function deployScrollChainCommitmentVerifier() internal {
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
POSEIDON_UNIT2_ADDR,
L1_ZK_ROLLUP_PROXY_ADDR
);
function deployScrollChainCommitmentVerifier() internal {
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
POSEIDON_UNIT2_ADDR,
L1_SCROLL_CHAIN_PROXY_ADDR
);
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
}
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -20,14 +20,13 @@ contract InitializeL1BridgeContracts is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envOr("MAX_L2_TX_IN_CHUNK", uint256(44));
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
address L1_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_ROLLUP_VERIFIER_ADDR");
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
address L2_GAS_PRICE_ORACLE_PROXY_ADDR = vm.envAddress("L2_GAS_PRICE_ORACLE_PROXY_ADDR");
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
@@ -38,7 +37,8 @@ contract InitializeL1BridgeContracts is Script {
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
address ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR");
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
@@ -55,8 +55,12 @@ contract InitializeL1BridgeContracts is Script {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
// initialize ScrollChain
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).initialize(L1_MESSAGE_QUEUE_PROXY_ADDR, L1_ROLLUP_VERIFIER_ADDR, MAX_L2_TX_IN_CHUNK);
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
L1_MESSAGE_QUEUE_PROXY_ADDR,
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
MAX_L2_TX_IN_CHUNK
);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
// initialize L2GasPriceOracle
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(0, 0, 0, 0);
@@ -65,8 +69,8 @@ contract InitializeL1BridgeContracts is Script {
// initialize L1MessageQueue
L1MessageQueue(L1_MESSAGE_QUEUE_PROXY_ADDR).initialize(
L1_SCROLL_MESSENGER_PROXY_ADDR,
L1_ZK_ROLLUP_PROXY_ADDR,
ENFORCED_TX_GATEWAY_PROXY_ADDR,
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
10000000
);
@@ -75,13 +79,13 @@ contract InitializeL1BridgeContracts is Script {
L1ScrollMessenger(payable(L1_SCROLL_MESSENGER_PROXY_ADDR)).initialize(
L2_SCROLL_MESSENGER_PROXY_ADDR,
L1_FEE_VAULT_ADDR,
L1_ZK_ROLLUP_PROXY_ADDR,
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_MESSAGE_QUEUE_PROXY_ADDR
);
// initialize EnforcedTxGateway
EnforcedTxGateway(payable(ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
L2_SCROLL_MESSENGER_PROXY_ADDR,
EnforcedTxGateway(payable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
L1_MESSAGE_QUEUE_PROXY_ADDR,
L1_FEE_VAULT_ADDR
);

View File

@@ -15,7 +15,7 @@ async function main() {
const [deployer] = await ethers.getSigners();
const rollupAddr = process.env.L1_ZK_ROLLUP_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
const rollupAddr = process.env.L1_SCROLL_CHAIN_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
console.log("Using rollup proxy address:", rollupAddr);
const ScrollChain = await ethers.getContractAt("ScrollChain", rollupAddr, deployer);

View File

@@ -51,10 +51,12 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
PublicKey: pk,
}
for id, sess := range m.sessions {
if _, ok := sess.info.Rollers[pk]; ok {
info.ActiveSessionStartTime = time.Unix(sess.info.StartTimestamp, 0)
info.ActiveSession = id
break
for _, sessionInfo := range sess.sessionInfos {
if sessionInfo.RollerPublicKey == pk {
info.ActiveSessionStartTime = *sessionInfo.CreatedAt
info.ActiveSession = id
break
}
}
}
res = append(res, info)
@@ -66,14 +68,14 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
now := time.Now()
var nameList []string
for pk := range sess.info.Rollers {
nameList = append(nameList, sess.info.Rollers[pk].Name)
for _, sessionInfo := range sess.sessionInfos {
nameList = append(nameList, sessionInfo.RollerName)
}
info := SessionInfo{
ID: sess.info.ID,
ID: sess.taskID,
Status: status.String(),
AssignedRollers: nameList,
StartTime: time.Unix(sess.info.StartTimestamp, 0),
StartTime: *sess.sessionInfos[0].CreatedAt,
Error: errMsg,
}
if finished {

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -57,7 +57,8 @@ type rollerProofStatus struct {
// Contains all the information on an ongoing proof generation session.
type session struct {
info *types.SessionInfo
taskID string
sessionInfos []*types.SessionInfo
// finish channel is used to pass the public key of the rollers who finished proving process.
finishChan chan rollerProofStatus
}
@@ -248,24 +249,21 @@ func (m *Manager) restorePrevSessions() {
log.Error("failed to recover roller session info from db", "error", err)
return
}
sessionInfosMaps := make(map[string][]*types.SessionInfo)
for _, v := range prevSessions {
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
v.RollerName, "prove type", v.ProveType, "public key", v.RollerPublicKey, "proof status", v.ProvingStatus)
sessionInfosMaps[v.TaskID] = append(sessionInfosMaps[v.TaskID], v)
}
for taskID, sessionInfos := range sessionInfosMaps {
sess := &session{
info: v,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"prove type", sess.info.ProveType,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
m.sessions[taskID] = sess
go m.CollectProofs(sess)
}
@@ -287,36 +285,40 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
if !ok {
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
}
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
var tmpSessionInfo *types.SessionInfo
for _, si := range sess.sessionInfos {
// get the send session info of this proof msg
if si.TaskID == msg.ID && si.RollerPublicKey == pk {
tmpSessionInfo = si
}
}
if tmpSessionInfo == nil {
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
}
proofTime := time.Since(*tmpSessionInfo.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// Ensure this roller is eligible to participate in the session.
roller, ok := sess.info.Rollers[pk]
if !ok {
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
}
if roller.Status == types.RollerProofValid {
if types.RollerProveStatus(tmpSessionInfo.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"prove type", sess.info.ProveType,
"roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey,
"prove type", tmpSessionInfo.ProveType,
"proof id", msg.ID,
)
return nil
}
log.Info(
"handling zk proof",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"prove type", sess.info.ProveType,
"proof time", proofTimeSec,
)
log.Info("handling zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName, "roller pk",
tmpSessionInfo.RollerPublicKey, "prove type", tmpSessionInfo.ProveType, "proof time", proofTimeSec)
defer func() {
// TODO: maybe we should use db tx for the whole process?
@@ -344,12 +346,12 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
if msg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
log.Info(
"proof generated by roller failed",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey,
"prove type", msg.Type,
"proof time", proofTimeSec,
"error", msg.Error,
@@ -383,8 +385,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
}
@@ -411,18 +413,32 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
// checkAttempts use the count of session info to check the attempts
func (m *Manager) checkAttemptsExceeded(hash string) bool {
sessionInfos, err := m.orm.GetSessionInfosByHashes([]string{hash})
if err != nil {
log.Error("get session info error", "hash id", hash, "error", err)
return true
}
if len(sessionInfos) >= int(m.cfg.SessionAttempts) {
return true
}
return false
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(sess *session) {
coordinatorSessionsActiveNumberGauge.Inc(1)
@@ -432,48 +448,47 @@ func (m *Manager) CollectProofs(sess *session) {
select {
//Execute after timeout, set in config.json. Consider all rollers failed.
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
// Check if session can be replayed
if sess.info.Attempts < m.cfg.SessionAttempts {
if !m.checkAttemptsExceeded(sess.taskID) {
var success bool
if sess.info.ProveType == message.AggregatorProve {
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
success = m.StartAggProofGenerationSession(nil, sess)
} else if sess.info.ProveType == message.BasicProve {
} else if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
success = m.StartBasicProofGenerationSession(nil, sess)
}
if success {
m.mu.Lock()
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
for _, v := range sess.sessionInfos {
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
}
m.mu.Unlock()
log.Info("Retrying session", "session id:", sess.info.ID)
log.Info("Retrying session", "session id:", sess.taskID)
return
}
}
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.info.ID)
log.Warn(errMsg, "session id", sess.taskID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if sess.info.ProveType == message.BasicProve {
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
if err := m.orm.UpdateProvingStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset basic task_status as Unassigned", "id", sess.taskID, "err", err)
}
}
if sess.info.ProveType == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
if err := m.orm.UpdateAggTaskStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.taskID, "err", err)
}
}
m.mu.Lock()
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
for _, v := range sess.sessionInfos {
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
}
delete(m.sessions, sess.info.ID)
delete(m.sessions, sess.taskID)
m.mu.Unlock()
coordinatorSessionsTimeoutTotalCounter.Inc(1)
return
@@ -481,7 +496,12 @@ func (m *Manager) CollectProofs(sess *session) {
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
for idx := range sess.sessionInfos {
if sess.sessionInfos[idx].RollerPublicKey == ret.pk {
sess.sessionInfos[idx].ProvingStatus = int16(ret.status)
}
}
if sess.isSessionFailed() {
if ret.typ == message.BasicProve {
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
@@ -493,12 +513,13 @@ func (m *Manager) CollectProofs(sess *session) {
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
if err := m.orm.UpdateSessionInfoProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
finished, validRollers := sess.isRollersFinished()
@@ -508,11 +529,10 @@ func (m *Manager) CollectProofs(sess *session) {
randIndex := rand.Int63n(int64(len(validRollers)))
_ = validRollers[randIndex]
// TODO: reward winner
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
for _, sessionInfo := range sess.sessionInfos {
m.freeTaskIDForRoller(sessionInfo.RollerPublicKey, sessionInfo.TaskID)
delete(m.sessions, sessionInfo.TaskID)
}
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
coordinatorSessionsSuccessTotalCounter.Inc(1)
@@ -528,14 +548,16 @@ func (m *Manager) CollectProofs(sess *session) {
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
func (s *session) isRollersFinished() (bool, []string) {
var validRollers []string
for pk, roller := range s.info.Rollers {
if roller.Status == types.RollerProofValid {
validRollers = append(validRollers, pk)
for _, sessionInfo := range s.sessionInfos {
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
validRollers = append(validRollers, sessionInfo.RollerPublicKey)
continue
}
if roller.Status == types.RollerProofInvalid {
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
continue
}
// Some rollers are still proving.
return false, nil
}
@@ -543,8 +565,8 @@ func (s *session) isRollersFinished() (bool, []string) {
}
func (s *session) isSessionFailed() bool {
for _, roller := range s.info.Rollers {
if roller.Status != types.RollerProofInvalid {
for _, sessionInfo := range s.sessionInfos {
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
return false
}
}
@@ -573,7 +595,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.info.ID
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
@@ -612,7 +634,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
}
// Dispatch task to basic rollers.
rollers := make(map[string]*types.RollerStatus)
var sessionInfos []*types.SessionInfo
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.BasicProve)
if roller == nil {
@@ -626,10 +648,27 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
now := time.Now()
tmpSessionInfo := types.SessionInfo{
TaskID: taskID,
RollerPublicKey: roller.PublicKey,
ProveType: int16(message.BasicProve),
RollerName: roller.Name,
CreatedAt: &now,
ProvingStatus: int16(types.RollerAssigned),
}
// Store session info.
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
sessionInfos = append(sessionInfos, &tmpSessionInfo)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.BasicProve, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
}
// No roller assigned.
if len(rollers) == 0 {
if len(sessionInfos) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
return false
}
@@ -642,33 +681,9 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
// Create a proof generation session.
sess := &session{
info: &types.SessionInfo{
ID: taskID,
Rollers: rollers,
ProveType: message.BasicProve,
StartTimestamp: time.Now().Unix(),
Attempts: 1,
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
if prevSession != nil {
sess.info.Attempts += prevSession.info.Attempts
}
for _, roller := range sess.info.Rollers {
log.Info(
"assigned proof to roller",
"session id", sess.info.ID,
"session type", sess.info.ProveType,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof status", roller.Status)
}
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
return false
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
@@ -685,7 +700,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
if task != nil {
taskID = task.ID
} else {
taskID = prevSession.info.ID
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
@@ -715,7 +730,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
}
// Dispatch task to basic rollers.
rollers := make(map[string]*types.RollerStatus)
var sessionInfos []*types.SessionInfo
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.AggregatorProve)
if roller == nil {
@@ -732,11 +747,29 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
now := time.Now()
tmpSessionInfo := types.SessionInfo{
TaskID: taskID,
RollerPublicKey: roller.PublicKey,
ProveType: int16(message.AggregatorProve),
RollerName: roller.Name,
CreatedAt: &now,
ProvingStatus: int16(types.RollerAssigned),
}
// Store session info.
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
sessionInfos = append(sessionInfos, &tmpSessionInfo)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.AggregatorProve, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
}
// No roller assigned.
if len(rollers) == 0 {
if len(sessionInfos) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
return false
}
@@ -749,33 +782,9 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
// Create a proof generation session.
sess := &session{
info: &types.SessionInfo{
ID: taskID,
Rollers: rollers,
ProveType: message.AggregatorProve,
StartTimestamp: time.Now().Unix(),
Attempts: 1,
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
if prevSession != nil {
sess.info.Attempts += prevSession.info.Attempts
}
for _, roller := range sess.info.Rollers {
log.Info(
"assigned proof to roller",
"session id", sess.info.ID,
"session type", sess.info.ProveType,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof status", roller.Status)
}
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
return false
taskID: taskID,
sessionInfos: sessionInfos,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
@@ -789,7 +798,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
func (m *Manager) addFailedSession(sess *session, errMsg string) {
m.mu.Lock()
defer m.mu.Unlock()
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
}
// VerifyToken verifies pukey for token and expiration time

View File

@@ -53,8 +53,8 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
defer m.mu.RUnlock()
taskIDs := cmap.New()
for id, sess := range m.sessions {
for pk, roller := range sess.info.Rollers {
if pk == pubkey && roller.Status == types.RollerAssigned {
for _, sessionInfo := range sess.sessionInfos {
if sessionInfo.RollerPublicKey == pubkey && sessionInfo.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(id, struct{}{})
}
}

View File

@@ -4,6 +4,7 @@ package verifier_test
import (
"encoding/json"
"flag"
"io"
"os"
"testing"
@@ -16,23 +17,23 @@ import (
"github.com/stretchr/testify/assert"
)
const (
paramsPath = "../assets/test_params"
aggVkPath = "../assets/agg_vk"
proofPath = "../assets/agg_proof"
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
aggVkPath = flag.String("vk", "/assets/agg_vk", "aggregation proof verification key path")
proofPath = flag.String("proof", "/assets/agg_proof", "aggregation proof path")
)
func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
MockMode: false,
ParamsPath: paramsPath,
AggVkPath: aggVkPath,
ParamsPath: *paramsPath,
AggVkPath: *aggVkPath,
}
v, err := verifier.NewVerifier(cfg)
as.NoError(err)
f, err := os.Open(proofPath)
f, err := os.Open(*proofPath)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)

View File

@@ -3,12 +3,21 @@
create table session_info
(
hash VARCHAR NOT NULL,
rollers_info BYTEA NOT NULL
);
id BIGSERIAL PRIMARY KEY,
task_id VARCHAR NOT NULL,
roller_public_key VARCHAR NOT NULL,
prove_type SMALLINT DEFAULT 0,
roller_name VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
failure_type SMALLINT DEFAULT 0,
reward BIGINT DEFAULT 0,
proof BYTEA DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL,
create unique index session_info_hash_uindex
on session_info (hash);
CONSTRAINT uk_session_unique UNIQUE (task_id, roller_public_key)
);
-- +goose StatementEnd

View File

@@ -44,6 +44,7 @@ type BlockTraceOrm interface {
type SessionInfoOrm interface {
GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error)
SetSessionInfo(rollersInfo *types.SessionInfo) error
UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error
}
// AggTaskOrm is aggregator task

View File

@@ -1,11 +1,12 @@
package orm
import (
"encoding/json"
"context"
"github.com/jmoiron/sqlx"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
type sessionInfoOrm struct {
@@ -23,7 +24,7 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
if len(hashes) == 0 {
return nil, nil
}
query, args, err := sqlx.In("SELECT rollers_info FROM session_info WHERE hash IN (?);", hashes)
query, args, err := sqlx.In("SELECT * FROM session_info WHERE task_id IN (?);", hashes)
if err != nil {
return nil, err
}
@@ -35,15 +36,11 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
var sessionInfos []*types.SessionInfo
for rows.Next() {
var infoBytes []byte
if err = rows.Scan(&infoBytes); err != nil {
var sessionInfo types.SessionInfo
if err = rows.StructScan(&sessionInfo); err != nil {
return nil, err
}
sessionInfo := &types.SessionInfo{}
if err = json.Unmarshal(infoBytes, sessionInfo); err != nil {
return nil, err
}
sessionInfos = append(sessionInfos, sessionInfo)
sessionInfos = append(sessionInfos, &sessionInfo)
}
if err = rows.Err(); err != nil {
return nil, err
@@ -53,11 +50,16 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
}
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *types.SessionInfo) error {
infoBytes, err := json.Marshal(rollersInfo)
if err != nil {
return err
}
sqlStr := "INSERT INTO session_info (hash, rollers_info) VALUES ($1, $2) ON CONFLICT (hash) DO UPDATE SET rollers_info = EXCLUDED.rollers_info;"
_, err = o.db.Exec(sqlStr, rollersInfo.ID, infoBytes)
sqlStr := "INSERT INTO session_info (task_id, roller_public_key, prove_type, roller_name, proving_status, failure_type, reward, proof, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT (task_id, roller_public_key) DO UPDATE SET proving_status = EXCLUDED.proving_status;"
_, err := o.db.Exec(sqlStr, rollersInfo.TaskID, rollersInfo.RollerPublicKey, rollersInfo.ProveType, rollersInfo.RollerName,
rollersInfo.ProvingStatus, rollersInfo.FailureType, rollersInfo.Reward, rollersInfo.Proof, rollersInfo.CreatedAt)
return err
}
// UpdateSessionInfoProvingStatus update the session info proving status
func (o *sessionInfoOrm) UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error {
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update session_info set proving_status = ? where prove_type = ? and task_id = ? and roller_public_key = ? ;"), int(proveType), int(status), taskID, pk); err != nil {
return err
}
return nil
}

View File

@@ -410,31 +410,29 @@ func testOrmSessionInfo(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
now := time.Now()
sessionInfo := types.SessionInfo{
ID: batchHash,
Rollers: map[string]*types.RollerStatus{
"0": {
PublicKey: "0",
Name: "roller-0",
Status: types.RollerAssigned,
},
},
StartTimestamp: time.Now().Unix()}
TaskID: batchHash,
RollerName: "roller-0",
RollerPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: &now,
}
// insert
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
assert.Equal(t, sessionInfo.RollerName, sessionInfos[0].RollerName)
// update
sessionInfo.Rollers["0"].Status = types.RollerProofValid
sessionInfo.ProvingStatus = int16(types.RollerProofValid)
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
assert.Equal(t, sessionInfo.ProvingStatus, sessionInfos[0].ProvingStatus)
// delete
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskVerified))

File diff suppressed because one or more lines are too long

View File

@@ -4,6 +4,7 @@ package prover_test
import (
"encoding/json"
"flag"
"io"
"os"
"path/filepath"
@@ -16,23 +17,23 @@ import (
"scroll-tech/roller/prover"
)
const (
paramsPath = "../assets/test_params"
seedPath = "../assets/test_seed"
tracesPath = "../assets/traces"
proofDumpPath = "agg_proof"
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
seedPath = flag.String("seed", "/assets/test_seed", "seed path")
tracesPath = flag.String("traces", "/assets/traces", "traces dir")
proofDumpPath = flag.String("dump", "/assets/agg_proof", "the path proofs dump to")
)
func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.ProverConfig{
ParamsPath: paramsPath,
SeedPath: seedPath,
ParamsPath: *paramsPath,
SeedPath: *seedPath,
}
prover, err := prover.NewProver(cfg)
as.NoError(err)
files, err := os.ReadDir(tracesPath)
files, err := os.ReadDir(*tracesPath)
as.NoError(err)
traces := make([]*types.BlockTrace, 0)
@@ -41,7 +42,7 @@ func TestFFI(t *testing.T) {
f *os.File
byt []byte
)
f, err = os.Open(filepath.Join(tracesPath, file.Name()))
f, err = os.Open(filepath.Join(*tracesPath, file.Name()))
as.NoError(err)
byt, err = io.ReadAll(f)
as.NoError(err)
@@ -54,10 +55,10 @@ func TestFFI(t *testing.T) {
t.Log("prove success")
// dump the proof
os.RemoveAll(proofDumpPath)
os.RemoveAll(*proofDumpPath)
proofByt, err := json.Marshal(proof)
as.NoError(err)
proofFile, err := os.Create(proofDumpPath)
proofFile, err := os.Create(*proofDumpPath)
as.NoError(err)
_, err = proofFile.Write(proofByt)
as.NoError(err)

View File

@@ -231,30 +231,38 @@ func (r *Roller) prove() error {
var traces []*types.BlockTrace
traces, err = r.getSortedTracesByHashes(task.Task.BlockHashes)
if err != nil {
return err
}
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(task.Task.ID, traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
Error: "get traces failed",
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
Proof: nil,
}
log.Error("prove block failed!", "task-id", task.Task.ID)
log.Error("get traces failed!", "task-id", task.Task.ID, "err", err)
} else {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Type: task.Task.Type,
Proof: proof,
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(task.Task.ID, traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
ID: task.Task.ID,
Type: task.Task.Type,
Proof: nil,
}
log.Error("prove block failed!", "task-id", task.Task.ID)
} else {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Type: task.Task.Type,
Proof: proof,
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
} else {
// when the roller has more than 3 times panic,
@@ -264,7 +272,7 @@ func (r *Roller) prove() error {
Error: "zk proving panic",
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
Proof: nil,
}
}