fix(rollup-relayer): sanity checks (#1720)

This commit is contained in:
colin
2025-08-12 14:57:02 +08:00
committed by GitHub
parent c012f7132d
commit ae791a0714
4 changed files with 198 additions and 22 deletions

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.39"
var tag = "v4.5.40"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -5,6 +5,7 @@ import (
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
@@ -16,17 +17,20 @@ import (
// transaction data (calldata and blobs) by parsing them and comparing against database records.
// This ensures the constructed transaction data is correct and consistent with the database state.
func (r *Layer2Relayer) sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata []byte, blobs []*kzg4844.Blob) error {
calldataInfo, err := r.parseCommitBatchesCalldata(calldata)
if r.l1RollupABI == nil {
return fmt.Errorf("l1RollupABI is nil: cannot parse commitBatches calldata")
}
calldataInfo, err := parseCommitBatchesCalldata(r.l1RollupABI, calldata)
if err != nil {
return fmt.Errorf("failed to parse calldata: %w", err)
}
batchesToValidate, err := r.getBatchesFromCalldata(calldataInfo)
batchesToValidate, l1MessagesWithBlockNumbers, err := r.getBatchesFromCalldata(calldataInfo)
if err != nil {
return fmt.Errorf("failed to get batches from database: %w", err)
}
if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate); err != nil {
if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate, l1MessagesWithBlockNumbers); err != nil {
return fmt.Errorf("calldata and blobs validation failed: %w", err)
}
@@ -45,8 +49,8 @@ type CalldataInfo struct {
}
// parseCommitBatchesCalldata parses the commitBatches calldata and extracts key information
func (r *Layer2Relayer) parseCommitBatchesCalldata(calldata []byte) (*CalldataInfo, error) {
method := r.l1RollupABI.Methods["commitBatches"]
func parseCommitBatchesCalldata(abi *abi.ABI, calldata []byte) (*CalldataInfo, error) {
method := abi.Methods["commitBatches"]
decoded, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return nil, fmt.Errorf("failed to unpack commitBatches calldata: %w", err)
@@ -81,17 +85,17 @@ func (r *Layer2Relayer) parseCommitBatchesCalldata(calldata []byte) (*CalldataIn
}
// getBatchesFromCalldata retrieves the relevant batches from database based on calldata information
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, error) {
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, map[uint64][]*types.TransactionData, error) {
// Get the parent batch to determine the starting point
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.ParentBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
return nil, nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
}
// Get the last batch to determine the ending point
lastBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.LastBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
return nil, nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
}
// Get all batches in the range (parent+1 to last)
@@ -100,29 +104,59 @@ func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWi
// Check if the range is valid
if firstBatchIndex > lastBatchIndex {
return nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
return nil, nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
}
var batchesToValidate []*dbBatchWithChunks
l1MessagesWithBlockNumbers := make(map[uint64][]*types.TransactionData)
for batchIndex := firstBatchIndex; batchIndex <= lastBatchIndex; batchIndex++ {
dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
if err != nil {
return nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
return nil, nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
}
// Get chunks for this batch
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
return nil, nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
}
batchesToValidate = append(batchesToValidate, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
return batchesToValidate, nil
// If there are L1 messages in this batch, retrieve L1 messages with block numbers
for _, chunk := range dbChunks {
if chunk.TotalL1MessagesPoppedInChunk > 0 {
blockWithL1Messages, err := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, chunk.StartBlockNumber, chunk.EndBlockNumber)
if err != nil {
return nil, nil, fmt.Errorf("failed to get L2 blocks for chunk %d: %w", chunk.Index, err)
}
var l1MessagesCount uint64
for _, block := range blockWithL1Messages {
bn := block.Header.Number.Uint64()
seenL2 := false
for _, tx := range block.Transactions {
if tx.Type == types.L1MessageTxType {
if seenL2 {
// Invariant violated: found an L1 message after an L2 transaction in the same block.
return nil, nil, fmt.Errorf("L1 message after L2 transaction in block %d", bn)
}
l1MessagesWithBlockNumbers[bn] = append(l1MessagesWithBlockNumbers[bn], tx)
l1MessagesCount++
} else {
seenL2 = true
}
}
}
if chunk.TotalL1MessagesPoppedInChunk != l1MessagesCount {
return nil, nil, fmt.Errorf("chunk %d has inconsistent L1 messages count: expected %d, got %d", chunk.Index, chunk.TotalL1MessagesPoppedInChunk, l1MessagesCount)
}
}
}
}
return batchesToValidate, l1MessagesWithBlockNumbers, nil
}
// validateDatabaseConsistency performs comprehensive validation of database records
@@ -299,7 +333,7 @@ func (r *Layer2Relayer) validateSingleChunkConsistency(chunk *orm.Chunk, prevChu
}
// validateCalldataAndBlobsAgainstDatabase validates calldata and blobs against database records
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks) error {
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
// Validate blobs
if len(blobs) == 0 {
return fmt.Errorf("no blobs provided")
@@ -338,7 +372,7 @@ func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *Ca
// Validate each blob against its corresponding batch
for i, blob := range blobs {
dbBatch := batchesToValidate[i].Batch
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec); err != nil {
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec, l1MessagesWithBlockNumbers); err != nil {
return fmt.Errorf("blob validation failed for batch %d: %w", dbBatch.Index, err)
}
}
@@ -347,7 +381,7 @@ func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *Ca
}
// validateSingleBlobAgainstBatch validates a single blob against its batch data
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec) error {
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
// Decode blob payload
payload, err := codec.DecodeBlob(blob)
if err != nil {
@@ -355,7 +389,7 @@ func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBat
}
// Validate batch hash
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec)
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
if err != nil {
return fmt.Errorf("failed to assemble batch from payload: %w", err)
}
@@ -401,8 +435,8 @@ func (r *Layer2Relayer) validateMessageQueueConsistency(batchIndex uint64, chunk
return nil
}
func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec) (encoding.DABatch, error) {
blocks, err := assembleBlocksFromPayload(payload)
func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) (encoding.DABatch, error) {
blocks, err := assembleBlocksFromPayload(payload, l1MessagesWithBlockNumbers)
if err != nil {
return nil, fmt.Errorf("failed to assemble blocks from payload batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
}
@@ -427,7 +461,7 @@ func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Bat
return daBatch, nil
}
func assembleBlocksFromPayload(payload encoding.DABlobPayload) ([]*encoding.Block, error) {
func assembleBlocksFromPayload(payload encoding.DABlobPayload, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) ([]*encoding.Block, error) {
daBlocks := payload.Blocks()
txns := payload.Transactions()
if len(daBlocks) != len(txns) {
@@ -442,8 +476,12 @@ func assembleBlocksFromPayload(payload encoding.DABlobPayload) ([]*encoding.Bloc
BaseFee: daBlocks[i].BaseFee(),
GasLimit: daBlocks[i].GasLimit(),
},
Transactions: encoding.TxsToTxsData(txns[i]),
}
// Ensure per-block ordering: [L1 messages][L2 transactions]. Prepend L1 messages (if any), then append L2 transactions.
if l1Messages, ok := l1MessagesWithBlockNumbers[daBlocks[i].Number()]; ok {
blocks[i].Transactions = l1Messages
}
blocks[i].Transactions = append(blocks[i].Transactions, encoding.TxsToTxsData(txns[i])...)
}
return blocks, nil
}

View File

@@ -0,0 +1,131 @@
package relayer
import (
"encoding/json"
"fmt"
"math/big"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
bridgeabi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
)
func TestAssembleDABatch(t *testing.T) {
calldataHex := "0x9bbaa2ba0000000000000000000000000000000000000000000000000000000000000008146793a7d71663cd87ec9713f72242a3798d5e801050130a3e16efaa09fb803e58af2593dadc8b9fff75a2d27199cb97ec115bade109b8d691a512608ef180eb"
blobsPath := filepath.Join("../../../testdata", "commit_batches_blobs.json")
calldata, err := hexutil.Decode(strings.TrimSpace(calldataHex))
assert.NoErrorf(t, err, "failed to decode calldata: %s", calldataHex)
blobs, err := loadBlobsFromJSON(blobsPath)
assert.NoErrorf(t, err, "failed to read blobs: %s", blobsPath)
assert.NotEmpty(t, blobs, "no blobs provided")
info, err := parseCommitBatchesCalldata(bridgeabi.ScrollChainABI, calldata)
assert.NoError(t, err)
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(info.Version))
assert.NoErrorf(t, err, "failed to get codec from version %d", info.Version)
parentBatchHash := info.ParentBatchHash
index := uint64(113571)
t.Logf("calldata parsed: version=%d parentBatchHash=%s lastBatchHash=%s blobs=%d", info.Version, info.ParentBatchHash.Hex(), info.LastBatchHash.Hex(), len(blobs))
fromAddr := common.HexToAddress("0x61d8d3e7f7c656493d1d76aaa1a836cedfcbc27b")
toAddr := common.HexToAddress("0xba50f5340fb9f3bd074bd638c9be13ecb36e603d")
l1MessagesWithBlockNumbers := map[uint64][]*types.TransactionData{
11488527: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072515,
Gas: 340000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8300000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa3700000000000000000000000000000000000000000000000000000000000024730000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000171bdb6e3062daaee1845ba4cb1902169feb5a9b9555a882d45637d3bd29eb83500000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
11488622: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072516,
Gas: 340000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8400000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa370000000000000000000000000000000000000000000000000000000000002474000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000012aeb01535c1845b689bfce22e53029ec59ec75ea20f660d7c5fcd99f55b75b6900000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
11489190: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072517,
Gas: 168000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e0000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba570000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba5700000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000105d8500000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
}
for i, blob := range blobs {
payload, decErr := codec.DecodeBlob(blob)
assert.NoErrorf(t, decErr, "blob[%d] decode failed", i)
if decErr != nil {
continue
}
dbBatch := &orm.Batch{
Index: index,
ParentBatchHash: parentBatchHash.Hex(),
}
daBatch, asmErr := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
assert.NoErrorf(t, asmErr, "blob[%d] assemble failed", i)
if asmErr == nil {
t.Logf("blob[%d] DABatch hash=%s", i, daBatch.Hash().Hex())
}
index += 1
parentBatchHash = daBatch.Hash()
}
}
func loadBlobsFromJSON(path string) ([]*kzg4844.Blob, error) {
raw, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var arr []hexutil.Bytes
if err := json.Unmarshal(raw, &arr); err != nil {
return nil, fmt.Errorf("invalid JSON; expect [\"0x...\"] array: %w", err)
}
out := make([]*kzg4844.Blob, 0, len(arr))
var empty kzg4844.Blob
want := len(empty)
for i, b := range arr {
if len(b) != want {
return nil, fmt.Errorf("blob[%d] length mismatch: got %d, want %d", i, len(b), want)
}
blob := new(kzg4844.Blob)
copy(blob[:], b)
out = append(out, blob)
}
return out, nil
}

File diff suppressed because one or more lines are too long