Compare commits

..

6 Commits

Author SHA1 Message Date
Péter Garamvölgyi
b1c49bd347 fix blob metadata encoding 2024-03-05 14:48:26 +01:00
Péter Garamvölgyi
9fc23a54d3 feat: support multiple DA codecs (wip) 2024-03-04 13:18:12 +01:00
georgehao
e41fee6766 feat: remove unused code (#1152)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-02-21 21:34:06 +08:00
colin
5c3b358a22 refactor(event-watcher): remove duplicated L2 message syncing features (#1134)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-02-20 15:57:16 +08:00
colin
58517f935f fix(gas-oracle): update gas price with diff at least 1 wei (#1146)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-02-20 14:23:27 +08:00
Chomtana
dc98cf9c08 docs: fix word transfered -> transferred (#1114)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-02-19 22:49:47 +08:00
36 changed files with 899 additions and 437 deletions

View File

@@ -0,0 +1,422 @@
package codecv0
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"scroll-tech/common/types/encoding"
)
const CodecV0Version = 0
type DABlock struct {
BlockNumber uint64
Timestamp uint64
BaseFee *big.Int
GasLimit uint64
NumTransactions uint16
NumL1Messages uint16
}
type DAChunk struct {
Blocks []*DABlock
Transactions [][]*types.TransactionData
}
type DABatch struct {
Version uint8
BatchIndex uint64
L1MessagePopped uint64
TotalL1MessagePopped uint64
DataHash common.Hash
ParentBatchHash common.Hash
SkippedL1MessageBitmap []byte
}
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
if !block.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
// note: numL1Messages includes skipped messages
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := block.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
daBlock := DABlock{
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: block.Header.BaseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(numTransactions),
NumL1Messages: uint16(numL1Messages),
}
return &daBlock, nil
}
func (b *DABlock) Encode() ([]byte, error) {
bytes := make([]byte, 60)
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
return bytes, nil
}
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
var blocks []*DABlock
var txs [][]*types.TransactionData
for _, block := range chunk.Blocks {
b, _ := NewDABlock(block, totalL1MessagePoppedBefore)
blocks = append(blocks, b)
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
txs = append(txs, block.Transactions)
}
daChunk := DAChunk{
Blocks: blocks,
Transactions: txs,
}
return &daChunk, nil
}
func (c *DAChunk) Encode() ([]byte, error) {
var chunkBytes []byte
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
var l2TxDataBytes []byte
for _, block := range c.Blocks {
blockBytes, _ := block.Encode()
chunkBytes = append(chunkBytes, blockBytes...)
}
for _, blockTxs := range c.Transactions {
for _, txData := range blockTxs {
if txData.Type == types.L1MessageTxType {
continue
}
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
if err != nil {
return nil, err
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
}
}
chunkBytes = append(chunkBytes, l2TxDataBytes...)
return chunkBytes, nil
}
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
if err != nil {
return common.Hash{}, err
}
numBlocks := chunkBytes[0]
// concatenate block contexts
var dataBytes []byte
for i := 0; i < int(numBlocks); i++ {
// only the first 58 bytes of each BlockContext are needed for the hashing process
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
}
// concatenate l1 and l2 tx hashes
for _, blockTxs := range c.Transactions {
var l1TxHashes []byte
var l2TxHashes []byte
for _, txData := range blockTxs {
txHash := strings.TrimPrefix(txData.TxHash, "0x")
hashBytes, err := hex.DecodeString(txHash)
if err != nil {
return common.Hash{}, err
}
if txData.Type == types.L1MessageTxType {
l1TxHashes = append(l1TxHashes, hashBytes...)
} else {
l2TxHashes = append(l2TxHashes, hashBytes...)
}
}
dataBytes = append(dataBytes, l1TxHashes...)
dataBytes = append(dataBytes, l2TxHashes...)
}
hash := crypto.Keccak256Hash(dataBytes)
return hash, nil
}
func NewDABatch(batch *encoding.Batch, totalL1MessagePoppedBefore uint64) (*DABatch, error) {
// buffer for storing chunk hashes in order to compute the batch data hash
var dataBytes []byte
// skipped L1 message bitmap, an array of 256-bit bitmaps
var skippedBitmap []*big.Int
// the first queue index that belongs to this batch
baseIndex := batch.TotalL1MessagePoppedBefore
// the next queue index that we need to process
nextIndex := batch.TotalL1MessagePoppedBefore
for chunkID, chunk := range batch.Chunks {
// build data hash
totalL1MessagePoppedBeforeChunk := nextIndex
daChunk, _ := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
chunkHash, err := daChunk.Hash()
if err != nil {
return nil, err
}
dataBytes = append(dataBytes, chunkHash.Bytes()...)
// build skip bitmap
for blockID, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
continue
}
currentIndex := tx.Nonce
if currentIndex < nextIndex {
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batch.Index, chunkID, blockID, block.Header.Hash(), tx.TxHash)
}
// mark skipped messages
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
quo := int((skippedIndex - baseIndex) / 256)
rem := int((skippedIndex - baseIndex) % 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
}
// process included message
quo := int((currentIndex - baseIndex) / 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
nextIndex = currentIndex + 1
}
}
}
// compute data hash
dataHash := crypto.Keccak256Hash(dataBytes)
// compute skipped bitmap
bitmapBytes := make([]byte, len(skippedBitmap)*32)
for ii, num := range skippedBitmap {
bytes := num.Bytes()
padding := 32 - len(bytes)
copy(bitmapBytes[32*ii+padding:], bytes)
}
daBatch := DABatch{
Version: CodecV0Version,
BatchIndex: batch.Index,
L1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
TotalL1MessagePopped: nextIndex,
DataHash: dataHash,
ParentBatchHash: batch.ParentBatchHash,
SkippedL1MessageBitmap: bitmapBytes,
}
return &daBatch, nil
}
func (b *DABatch) Encode() ([]byte, error) {
batchBytes := make([]byte, 89+len(b.SkippedL1MessageBitmap))
batchBytes[0] = b.Version
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
copy(batchBytes[25:], b.DataHash[:])
copy(batchBytes[57:], b.ParentBatchHash[:])
copy(batchBytes[89:], b.SkippedL1MessageBitmap[:])
return batchBytes, nil
}
func (b *DABatch) Hash() (common.Hash, error) {
bytes, _ := b.Encode()
return crypto.Keccak256Hash(bytes), nil
}
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
return nil, nil, nil
}
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
func GetKeccak256Gas(size uint64) uint64 {
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
}
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
memorySizeWord := (memoryByteSize + 31) / 32
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
return memoryCost
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func EstimateBlockL1CommitCalldataSize(w *encoding.Block) uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += 4 // 4 bytes payload length
size += getTxPayloadLength(txData)
}
size += 60 // 60 bytes BlockContext
return size
}
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func EstimateBlockL1CommitGas(w *encoding.Block) uint64 {
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}
txPayloadLength := getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
}
// 60 bytes BlockContext calldata
total += CalldataNonZeroByteGas * 60
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
return total
}
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) uint64 {
var totalL1CommitCalldataSize uint64
for _, block := range c.Blocks {
// totalL2TxGas += block.Header.GasUsed
// totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += EstimateBlockL1CommitCalldataSize(block)
}
return totalL1CommitCalldataSize
}
func getTxPayloadLength(txData *types.TransactionData) uint64 {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
if err != nil {
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
return 0
}
txPayloadLength := uint64(len(rlpTxData))
return txPayloadLength
}
// EstimateL1CommitGas calculates the total L1 commit gas for this chunk approximately
func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
var totalTxNum uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalTxNum += uint64(len(block.Transactions))
totalL1CommitGas += EstimateBlockL1CommitGas(block)
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}
// EstimateL1CommitGas calculates the total L1 commit gas for this chunk approximately
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
var totalL1CommitGas uint64
// Add extra gas costs
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
// TODO: handle parent batch
// totalL1CommitGas += GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
// totalL1CommitGas += CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
// adjust batch data hash gas cost
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
for _, chunk := range b.Chunks {
totalL1CommitGas += EstimateChunkL1CommitGas(chunk)
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
totalL1CommitCalldataSize := EstimateChunkL1CommitCalldataSize(chunk)
totalL1CommitGas += GetMemoryExpansionCost(uint64(totalL1CommitCalldataSize))
}
return totalL1CommitGas
}

View File

@@ -0,0 +1,329 @@
package codecv1
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"scroll-tech/common/types/encoding"
)
const CodecV1Version = 1
type DABlock struct {
BlockNumber uint64
Timestamp uint64
BaseFee *big.Int
GasLimit uint64
NumTransactions uint16
NumL1Messages uint16
}
type DAChunk struct {
Blocks []*DABlock
Transactions [][]*types.TransactionData
}
type DABatch struct {
// header
Version uint8
BatchIndex uint64
L1MessagePopped uint64
TotalL1MessagePopped uint64
DataHash common.Hash
BlobVersionedHash common.Hash
ParentBatchHash common.Hash
SkippedL1MessageBitmap []byte
// blob payload
sidecar *types.BlobTxSidecar
}
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
if !block.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
// note: numL1Messages includes skipped messages
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := block.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
daBlock := DABlock{
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: block.Header.BaseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(numTransactions),
NumL1Messages: uint16(numL1Messages),
}
return &daBlock, nil
}
func (b *DABlock) Encode() ([]byte, error) {
bytes := make([]byte, 60)
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
return bytes, nil
}
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
var blocks []*DABlock
var txs [][]*types.TransactionData
for _, block := range chunk.Blocks {
b, _ := NewDABlock(block, totalL1MessagePoppedBefore)
blocks = append(blocks, b)
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
txs = append(txs, block.Transactions)
}
daChunk := DAChunk{
Blocks: blocks,
Transactions: txs,
}
return &daChunk, nil
}
func (c *DAChunk) Encode() ([]byte, error) {
var chunkBytes []byte
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
for _, block := range c.Blocks {
blockBytes, _ := block.Encode()
chunkBytes = append(chunkBytes, blockBytes...)
}
return chunkBytes, nil
}
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
if err != nil {
return common.Hash{}, err
}
numBlocks := chunkBytes[0]
// concatenate block contexts
var dataBytes []byte
for i := 0; i < int(numBlocks); i++ {
// only the first 58 bytes of each BlockContext are needed for the hashing process
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
}
// concatenate l1 tx hashes
for _, blockTxs := range c.Transactions {
for _, txData := range blockTxs {
txHash := strings.TrimPrefix(txData.TxHash, "0x")
hashBytes, err := hex.DecodeString(txHash)
if err != nil {
return common.Hash{}, err
}
if txData.Type == types.L1MessageTxType {
dataBytes = append(dataBytes, hashBytes...)
}
}
}
hash := crypto.Keccak256Hash(dataBytes)
return hash, nil
}
func NewDABatch(batch *encoding.Batch, totalL1MessagePoppedBefore uint64) (*DABatch, error) {
// buffer for storing chunk hashes in order to compute the batch data hash
var dataBytes []byte
// skipped L1 message bitmap, an array of 256-bit bitmaps
var skippedBitmap []*big.Int
// the first queue index that belongs to this batch
baseIndex := batch.TotalL1MessagePoppedBefore
// the next queue index that we need to process
nextIndex := batch.TotalL1MessagePoppedBefore
// this encoding can only support up to 15 chunks per batch
if len(batch.Chunks) > 15 {
return nil, fmt.Errorf("too many chunks in batch")
}
for chunkID, chunk := range batch.Chunks {
// build data hash
totalL1MessagePoppedBeforeChunk := nextIndex
daChunk, _ := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
chunkHash, err := daChunk.Hash()
if err != nil {
return nil, err
}
dataBytes = append(dataBytes, chunkHash.Bytes()...)
// build skip bitmap
for blockID, block := range chunk.Blocks {
for _, tx := range block.Transactions {
currentIndex := tx.Nonce
if currentIndex < nextIndex {
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batch.Index, chunkID, blockID, block.Header.Hash(), tx.TxHash)
}
// mark skipped messages
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
quo := int((skippedIndex - baseIndex) / 256)
rem := int((skippedIndex - baseIndex) % 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
}
// process included message
quo := int((currentIndex - baseIndex) / 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
nextIndex = currentIndex + 1
}
}
}
// compute data hash
dataHash := crypto.Keccak256Hash(dataBytes)
// compute skipped bitmap
bitmapBytes := make([]byte, len(skippedBitmap)*32)
for ii, num := range skippedBitmap {
bytes := num.Bytes()
padding := 32 - len(bytes)
copy(bitmapBytes[32*ii+padding:], bytes)
}
// encode blob payload
blobPayload := make([]byte, 31)
// metadata: n_chunks
blobPayload[0] = byte(len(batch.Chunks))
for chunkID, chunk := range batch.Chunks {
var chunkBlobPayload []byte
for _, block := range chunk.Blocks {
for _, tx := range block.Transactions {
// encode L2 txs into blob payload
if tx.Type != types.L1MessageTxType {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, err
}
chunkBlobPayload = append(chunkBlobPayload, rlpTxData...)
continue
}
}
}
blobPayload = append(blobPayload, chunkBlobPayload...)
// metadata: chunki_size
size := uint16(len(chunkBlobPayload)) // TODO: update to u32
binary.BigEndian.PutUint16(blobPayload[1+2*chunkID:], size)
}
// blob contains 131072 bytes but we can only utilize 31/32 of these
if len(blobPayload) > 126976 {
return nil, fmt.Errorf("oversized batch payload")
}
// encode into blob by prepending every 31 bytes with 1 zero byte
var blob kzg4844.Blob
index := 0
for from := 0; from < len(blobPayload); from += 31 {
to := from + 31
if to > len(blobPayload) {
to = len(blobPayload)
}
copy(blob[index+1:], blobPayload[from:to])
index += 32
}
// create sidecar
c, err := kzg4844.BlobToCommitment(blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
p, _ := kzg4844.ComputeBlobProof(blob, c)
if err != nil {
return nil, fmt.Errorf("failed to compute blob proof")
}
sidecar := &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{blob},
Commitments: []kzg4844.Commitment{c},
Proofs: []kzg4844.Proof{p},
}
hasher := sha256.New()
blobVersionedHash := kzg4844.CalcBlobHashV1(hasher, &c)
daBatch := DABatch{
Version: CodecV1Version,
BatchIndex: batch.Index,
L1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
TotalL1MessagePopped: nextIndex,
DataHash: dataHash,
BlobVersionedHash: blobVersionedHash,
ParentBatchHash: batch.ParentBatchHash,
SkippedL1MessageBitmap: bitmapBytes,
sidecar: sidecar,
}
return &daBatch, nil
}
func (b *DABatch) Encode() ([]byte, error) {
batchBytes := make([]byte, 121+len(b.SkippedL1MessageBitmap))
batchBytes[0] = b.Version
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
copy(batchBytes[25:], b.DataHash[:])
copy(batchBytes[57:], b.BlobVersionedHash[:])
copy(batchBytes[89:], b.ParentBatchHash[:])
copy(batchBytes[121:], b.SkippedL1MessageBitmap[:])
return batchBytes, nil
}
func (b *DABatch) Hash() (common.Hash, error) {
bytes, _ := b.Encode()
return crypto.Keccak256Hash(bytes), nil
}
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
return nil, nil, nil
}

View File

@@ -0,0 +1,92 @@
package encoding
import (
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
type Block struct {
Header *types.Header
Transactions []*types.TransactionData
}
type Chunk struct {
Blocks []*Block `json:"blocks"`
}
type Batch struct {
Index uint64
TotalL1MessagePoppedBefore uint64
ParentBatchHash common.Hash
Chunks []*Chunk
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (w *Block) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (w *Block) NumL2Transactions() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// NumL1Messages returns the number of L1 messages in this chunk.
// This number is the sum of included and skipped L1 messages.
func (c *Chunk) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var numL1Messages uint64
for _, block := range c.Blocks {
numL1MessagesInBlock := block.NumL1Messages(totalL1MessagePoppedBefore)
numL1Messages += numL1MessagesInBlock
totalL1MessagePoppedBefore += numL1MessagesInBlock
}
// TODO: cache results
return numL1Messages
}
func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
}
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
}
return rlpTxData, nil
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.65"
var tag = "v4.3.67"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -442,7 +442,7 @@ Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
event FinalizeBatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
```
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
Emitted when the ERC1155 NFT is batch transferred to recipient on layer 1.
@@ -463,7 +463,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
event FinalizeWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
```
Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
Emitted when the ERC1155 NFT is transferred to recipient on layer 1.

View File

@@ -384,7 +384,7 @@ Emitted when the ERC721 NFT is deposited to gateway on layer 1.
event FinalizeBatchWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
Emitted when the ERC721 NFT is batch transferred to recipient on layer 1.
@@ -404,7 +404,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
event FinalizeWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
```
Emitted when the ERC721 NFT is transfered to recipient on layer 1.
Emitted when the ERC721 NFT is transferred to recipient on layer 1.

View File

@@ -6,7 +6,7 @@
The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and finalize withdraw `WETH` from layer 2.
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transferred from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
## Methods

View File

@@ -6,7 +6,7 @@
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transferred to the recipient. This will be changed if we have more specific scenarios.*
## Methods
@@ -365,7 +365,7 @@ Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
event BatchWithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
Emitted when the ERC1155 NFT is batch transferred to gateway on layer 2.
@@ -386,7 +386,7 @@ Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
event FinalizeBatchDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
Emitted when the ERC1155 NFT is batch transferred to recipient on layer 2.
@@ -407,7 +407,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
event FinalizeDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
Emitted when the ERC1155 NFT is transferred to recipient on layer 2.
@@ -479,7 +479,7 @@ Emitted when token mapping for ERC1155 token is updated.
event WithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
Emitted when the ERC1155 NFT is transferred to gateway on layer 2.

View File

@@ -6,7 +6,7 @@
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transferred to the recipient. This will be changed if we have more specific scenarios.*
## Methods
@@ -310,7 +310,7 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
event BatchWithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
Emitted when the ERC721 NFT is batch transferred to gateway on layer 2.
@@ -330,7 +330,7 @@ Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
event FinalizeBatchDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
Emitted when the ERC721 NFT is batch transferred to recipient on layer 2.
@@ -350,7 +350,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
event FinalizeDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to recipient on layer 2.
Emitted when the ERC721 NFT is transferred to recipient on layer 2.
@@ -421,7 +421,7 @@ Emitted when token mapping for ERC721 token is updated.
event WithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to gateway on layer 2.
Emitted when the ERC721 NFT is transferred to gateway on layer 2.

View File

@@ -6,7 +6,7 @@
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and finalize deposit the tokens from layer 1.
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transferred to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*
## Methods

View File

@@ -6,7 +6,7 @@
The `L2WETHGateway` contract is used to withdraw `WETH` token on layer 2 and finalize deposit `WETH` from layer 1.
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transferred from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
## Methods

View File

@@ -8,7 +8,7 @@ interface IL1ERC1155Gateway {
* Events *
**********/
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
/// @notice Emitted when the ERC1155 NFT is transferred to recipient on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 2.
@@ -24,7 +24,7 @@ interface IL1ERC1155Gateway {
uint256 _amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
/// @notice Emitted when the ERC1155 NFT is batch transferred to recipient on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 2.

View File

@@ -8,7 +8,7 @@ interface IL1ERC721Gateway {
* Events *
**********/
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 1.
/// @notice Emitted when the ERC721 NFT is transferred to recipient on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 2.
@@ -22,7 +22,7 @@ interface IL1ERC721Gateway {
uint256 _tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
/// @notice Emitted when the ERC721 NFT is batch transferred to recipient on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 2.

View File

@@ -15,7 +15,7 @@ import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
/// finalize withdraw `WETH` from layer 2.
/// @dev The deposited WETH tokens are not held in the gateway. It will first be unwrapped
/// as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract.
/// On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then
/// On finalizing withdraw, the Ether will be transferred from `L1ScrollMessenger`, then
/// wrapped as WETH and finally transfer to recipient.
contract L1WETHGateway is L1ERC20Gateway {
/*************

View File

@@ -8,7 +8,7 @@ interface IL2ERC1155Gateway {
* Events *
**********/
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
/// @notice Emitted when the ERC1155 NFT is transferred to recipient on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 1.
@@ -24,7 +24,7 @@ interface IL2ERC1155Gateway {
uint256 amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
/// @notice Emitted when the ERC1155 NFT is batch transferred to recipient on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 1.
@@ -40,7 +40,7 @@ interface IL2ERC1155Gateway {
uint256[] amounts
);
/// @notice Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
/// @notice Emitted when the ERC1155 NFT is transferred to gateway on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 2.
@@ -56,7 +56,7 @@ interface IL2ERC1155Gateway {
uint256 amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
/// @notice Emitted when the ERC1155 NFT is batch transferred to gateway on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 2.

View File

@@ -8,7 +8,7 @@ interface IL2ERC721Gateway {
* Events *
**********/
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 2.
/// @notice Emitted when the ERC721 NFT is transferred to recipient on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 1.
@@ -22,7 +22,7 @@ interface IL2ERC721Gateway {
uint256 tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
/// @notice Emitted when the ERC721 NFT is batch transferred to recipient on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 1.
@@ -36,7 +36,7 @@ interface IL2ERC721Gateway {
uint256[] tokenIds
);
/// @notice Emitted when the ERC721 NFT is transfered to gateway on layer 2.
/// @notice Emitted when the ERC721 NFT is transferred to gateway on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 2.
@@ -50,7 +50,7 @@ interface IL2ERC721Gateway {
uint256 tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
/// @notice Emitted when the ERC721 NFT is batch transferred to gateway on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 2.

View File

@@ -12,7 +12,7 @@ import {IScrollERC20Upgradeable} from "../../libraries/token/IScrollERC20Upgrade
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
/// tokens will be minted and transfered to the recipient.
/// tokens will be minted and transferred to the recipient.
contract L2CustomERC20Gateway is L2ERC20Gateway {
/**********
* Events *

View File

@@ -14,7 +14,7 @@ import {IScrollERC1155} from "../../libraries/token/IScrollERC1155.sol";
/// @notice The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and
/// finalize deposit the NFTs from layer 1.
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
/// NFT will be minted and transfered to the recipient.
/// NFT will be minted and transferred to the recipient.
///
/// This will be changed if we have more specific scenarios.
contract L2ERC1155Gateway is ERC1155HolderUpgradeable, ScrollGatewayBase, IL2ERC1155Gateway {

View File

@@ -14,7 +14,7 @@ import {IScrollERC721} from "../../libraries/token/IScrollERC721.sol";
/// @notice The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and
/// finalize deposit the NFTs from layer 1.
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
/// NFT will be minted and transfered to the recipient.
/// NFT will be minted and transferred to the recipient.
///
/// This will be changed if we have more specific scenarios.
contract L2ERC721Gateway is ERC721HolderUpgradeable, ScrollGatewayBase, IL2ERC721Gateway {

View File

@@ -12,7 +12,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @notice The `L2ETHGateway` contract is used to withdraw ETH token on layer 2 and
/// finalize deposit ETH from layer 1.
/// @dev The ETH are not held in the gateway. The ETH will be sent to the `L2ScrollMessenger` contract.
/// On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then transfer to recipient.
/// On finalizing deposit, the Ether will be transferred from `L2ScrollMessenger`, then transfer to recipient.
contract L2ETHGateway is ScrollGatewayBase, IL2ETHGateway {
/***************
* Constructor *

View File

@@ -16,7 +16,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @notice The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding
/// token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality
/// token will be minted and transferred to the recipient. Any ERC20 that requires non-standard functionality
/// should use a separate gateway.
contract L2StandardERC20Gateway is L2ERC20Gateway {
using AddressUpgradeable for address;

View File

@@ -16,7 +16,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// finalize deposit `WETH` from layer 1.
/// @dev The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and
/// then the Ether will be sent to the `L2ScrollMessenger` contract.
/// On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then
/// On finalizing deposit, the Ether will be transferred from `L2ScrollMessenger`, then
/// wrapped as WETH and finally transfer to recipient.
contract L2WETHGateway is L2ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;

View File

@@ -653,7 +653,7 @@ contract L1LidoGatewayTest is L1GatewayTestBase {
gasLimit,
feePerGas
);
assertEq(balance - feePerGas, address(this).balance); // extra value is transfered back
assertEq(balance - feePerGas, address(this).balance); // extra value is transferred back
assertGt(l1Messenger.messageSendTimestamp(keccak256(xDomainCalldata)), 0);
assertEq(thisBalance - amount, l1Token.balanceOf(address(this)));
assertEq(feeVaultBalance + feePerGas, address(feeVault).balance);

View File

@@ -232,7 +232,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
@@ -322,7 +321,6 @@ github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -333,6 +331,7 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
@@ -466,6 +465,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -904,9 +904,7 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA=
@@ -1264,6 +1262,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@@ -1663,6 +1663,5 @@ rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -67,27 +67,15 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.

View File

@@ -88,8 +88,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to create batchProposer", "config file", cfgFile, "error", err)
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {

View File

@@ -27,7 +27,6 @@
"l2_config": {
"confirmations": "0x1",
"endpoint": "https://rpc.scroll.io",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"rollup_contract_address": "0x0000000000000000000000000000000000000000",

View File

@@ -12,8 +12,6 @@ type L2Config struct {
Confirmations rpc.BlockNumber `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.

View File

@@ -120,6 +120,9 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
if r.lastGasPrice > 0 && expectedDelta == 0 {
expectedDelta = 1
}
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
baseFee := big.NewInt(int64(block.BaseFee))

View File

@@ -294,6 +294,9 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
if r.lastGasPrice > 0 && expectedDelta == 0 {
expectedDelta = 1
}
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {

View File

@@ -1,11 +1,3 @@
package watcher
import "github.com/scroll-tech/go-ethereum/common"
const contractEventsBlocksFetchLimit = int64(10)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}

View File

@@ -6,7 +6,6 @@ import (
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
@@ -21,7 +20,6 @@ import (
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
@@ -31,65 +29,33 @@ type L2WatcherClient struct {
*ethclient.Client
l2BlockOrm *orm.L2Block
l1MessageOrm *orm.L1Message
l2BlockOrm *orm.L2Block
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
withdrawTrieRootSlot common.Hash
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
metrics *l2WatcherMetrics
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
if err != nil || l1msg == nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
} else {
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
if err != nil || receipt == nil {
log.Warn("get tx from l2 failed", "err", err)
savedHeight = 0
} else {
savedHeight = receipt.BlockNumber.Uint64()
}
}
w := L2WatcherClient{
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
return &L2WatcherClient{
ctx: ctx,
Client: client,
l2BlockOrm: orm.NewL2Block(db),
l1MessageOrm: orm.NewL1Message(db),
processedMsgHeight: savedHeight,
confirmations: confirmations,
l2BlockOrm: orm.NewL2Block(db),
messengerAddress: messengerAddress,
messengerABI: bridgeAbi.L2ScrollMessengerABI,
confirmations: confirmations,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridgeAbi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0,
metrics: initL2WatcherMetrics(reg),
}
return &w
}
const blockTracesFetchLimit = uint64(10)
@@ -190,124 +156,3 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *L2WatcherClient) FetchContractEvent() {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
w.metrics.fetchContractEventTotal.Inc()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.messageQueueAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 4)
query.Topics[0][0] = bridgeAbi.L2SentMessageEventSignature
query.Topics[0][1] = bridgeAbi.L2RelayedMessageEventSignature
query.Topics[0][2] = bridgeAbi.L2FailedRelayedMessageEventSignature
query.Topics[0][3] = bridgeAbi.L2AppendMessageEventSignature
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
relayedMessageCount := int64(len(relayedMessageEvents))
w.metrics.rollupL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
msgStatus = types.MsgConfirmed
} else {
msgStatus = types.MsgFailed
}
if err = w.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
}
}
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case bridgeAbi.L2RelayedMessageEventSignature:
event := bridgeAbi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case bridgeAbi.L2FailedRelayedMessageEventSignature:
event := bridgeAbi.L2FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
}
return relayedMessages, nil
}

View File

@@ -10,9 +10,6 @@ import (
type l2WatcherMetrics struct {
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
fetchContractEventTotal prometheus.Counter
fetchContractEventHeight prometheus.Gauge
rollupL2MsgsRelayedEventsTotal prometheus.Counter
rollupL2BlocksFetchedGap prometheus.Gauge
rollupL2BlockL1CommitCalldataSize prometheus.Gauge
}
@@ -33,18 +30,6 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
Name: "rollup_l2_watcher_fetch_running_missing_blocks_height",
Help: "The total number of l2 watcher fetch running missing blocks height",
}),
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_l2_watcher_fetch_contract_events_total",
Help: "The total number of l2 watcher fetch contract events",
}),
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_watcher_fetch_contract_height",
Help: "The total number of l2 watcher fetch contract height",
}),
rollupL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_l2_watcher_msg_relayed_events_total",
Help: "The total number of l2 watcher msg relayed event",
}),
rollupL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",

View File

@@ -3,72 +3,31 @@ package watcher
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
"time"
"gorm.io/gorm"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
"scroll-tech/rollup/mock_bridge"
)
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
return watcher, db
}
func testCreateNewWatcherAndStop(t *testing.T) {
wc, db := setupL2Watcher(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
defer database.CloseDB(db)
}()
loopToFetchEvent(subCtx, wc)
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t)
defer database.CloseDB(db)
@@ -97,7 +56,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, common.Hash{}, db, nil)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
@@ -110,87 +69,3 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
auth.GasLimit = 500000
return auth
}
func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer database.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{bridgeAbi.L2RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, relayedMessages)
})
convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer database.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{bridgeAbi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, relayedMessages)
})
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}

View File

@@ -110,10 +110,7 @@ func TestFunction(t *testing.T) {
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
// Run chunk proposer test cases.
t.Run("TestChunkProposerLimits", testChunkProposerLimits)

View File

@@ -7,8 +7,6 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Message is structure of stored layer1 bridge message
@@ -58,48 +56,6 @@ func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
return -1, nil
}
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
var msg *L1Message
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
if err != nil {
return nil, err
}
return msg, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
var msgs []L1Message
err := m.db.Where("status", int(status)).Order("queue_index ASC").Limit(int(limit)).Find(&msgs).Error
if err != nil {
return nil, err
}
return msgs, nil
}
// GetL1MessageByQueueIndex fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByQueueIndex(queueIndex uint64) (*L1Message, error) {
var msg L1Message
err := m.db.Where("queue_index", queueIndex).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessageByMsgHash fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByMsgHash(msgHash string) (*L1Message, error) {
var msg L1Message
err := m.db.Where("msg_hash", msgHash).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL1Messages batch save a list of layer1 messages
func (m *L1Message) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
if len(messages) == 0 {
@@ -118,23 +74,3 @@ func (m *L1Message) SaveL1Messages(ctx context.Context, messages []*L1Message) e
}
return err
}
// UpdateLayer1Status updates message stauts, given message hash
func (m *L1Message) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error; err != nil {
return err
}
return nil
}
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
func (m *L1Message) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
updateFields := map[string]interface{}{
"status": int(status),
"layer2_hash": layer2Hash,
}
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}