Files
scroll/bridge/l1/watcher.go
Haichen Shen fb7002bd6d feat(bridge): update the watcher and relayer based on the new contract (#305)
Co-authored-by: colinlyguo <651734127@qq.com>
Co-authored-by: zimpha <zimpha@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-02-22 18:15:44 +08:00

421 lines
12 KiB
Go

package l1
import (
"context"
"math/big"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
)
var (
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
status types.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
ctx context.Context
client *ethclient.Client
db database.OrmFactory
// The number of new blocks to wait for a block to be confirmed
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
scrollChainAddress common.Address
scrollChainABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
stopCh chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
savedHeight = 0
}
if savedHeight < int64(startHeight) {
savedHeight = int64(startHeight)
}
savedL1BlockHeight, err := db.GetLatestL1BlockHeight()
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0
}
if savedL1BlockHeight < startHeight {
savedL1BlockHeight = startHeight
}
stopCh := make(chan bool)
return &Watcher{
ctx: ctx,
client: client,
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1ScrollMessengerABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L1MessageQueueABI,
scrollChainAddress: scrollChainAddress,
scrollChainABI: bridge_abi.ScrollChainABI,
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
stopCh: stopCh,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
go func() {
ctx, cancel := context.WithCancel(w.ctx)
go func(ctx context.Context) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
if err := w.FetchBlockHeader(number); err != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
}
}
}
}(ctx)
go func(ctx context.Context) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
if err := w.FetchContractEvent(number); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
}
}
}(ctx)
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stopCh <- true
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
return nil
}
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
var blocks []*types.L1BlockInfo
var err error
height := fromBlock
for ; height <= toBlock; height++ {
var block *geth_types.Header
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
if err != nil {
log.Warn("Failed to get block", "height", height, "err", err)
break
}
blocks = append(blocks, &types.L1BlockInfo{
Number: uint64(height),
Hash: block.Hash().String(),
BaseFee: block.BaseFee.Uint64(),
})
}
// failed at first block, return with the error
if height == fromBlock {
return err
}
toBlock = height - 1
// insert succeed blocks
err = w.db.InsertL1Blocks(w.ctx, blocks)
if err != nil {
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
return err
}
// update processed height
w.processedBlockHeight = uint64(toBlock)
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.scrollChainAddress,
w.messageQueueAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = bridge_abi.L1QueueTransactionEventSignature
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchHashes []string
for _, event := range rollupEvents {
batchHashes = append(batchHashes, event.batchHash.String())
}
statuses, err := w.db.GetRollupStatusByHashList(batchHashes)
if err != nil {
log.Error("Failed to GetRollupStatusByHashList", "err", err)
return err
}
if len(statuses) != len(batchHashes) {
log.Error("RollupStatus.Length mismatch with batchHashes.Length", "RollupStatus.Length", len(statuses), "batchHashes.Length", len(batchHashes))
return nil
}
for index, event := range rollupEvents {
batchHash := event.batchHash.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == types.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} else if event.status == types.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
msgStatus = types.MsgConfirmed
} else {
msgStatus = types.MsgFailed
}
if err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
}
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1Messages []*types.L1Message
var relayedMessages []relayedMessage
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case bridge_abi.L1QueueTransactionEventSignature:
event := bridge_abi.L1QueueTransactionEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
l1Messages = append(l1Messages, &types.L1Message{
QueueIndex: event.QueueIndex.Uint64(),
MsgHash: msgHash.String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Data),
GasLimit: event.GasLimit.Uint64(),
Layer1Hash: vLog.TxHash.Hex(),
})
case bridge_abi.L1RelayedMessageEventSignature:
event := bridge_abi.L1RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case bridge_abi.L1FailedRelayedMessageEventSignature:
event := bridge_abi.L1FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case bridge_abi.L1CommitBatchEventSignature:
event := bridge_abi.L1CommitBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchHash: event.BatchHash,
txHash: vLog.TxHash,
status: types.RollupCommitted,
})
case bridge_abi.L1FinalizeBatchEventSignature:
event := bridge_abi.L1FinalizeBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchHash: event.BatchHash,
txHash: vLog.TxHash,
status: types.RollupFinalized,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
}
return l1Messages, relayedMessages, rollupEvents, nil
}