Compare commits

..

21 Commits

Author SHA1 Message Date
HAOYUatHZ
b7a037c18b Merge commit 'e6c32ee6' into HEAD 2024-04-03 11:35:21 +08:00
Xi Lin
e6c32ee636 fix(contracts): fix number of non-skipped l1 messages (#1232)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-03 11:26:37 +08:00
HAOYUatHZ
40aca8558b skip finalization sequence check 2024-04-03 11:21:44 +08:00
HAOYUatHZ
bd26d2d86c Merge commit 'c1afa61d' into 4844-env1-add-append 2024-04-01 15:51:49 +08:00
HAOYUatHZ
66ad90b547 fix 2024-03-28 22:51:39 +08:00
HAOYUatHZ
669456f8cc add queueTransaction to import message hashes directly 2024-03-27 16:04:46 +08:00
Péter Garamvölgyi
c1afa61da8 skip batch proof for local testing 2024-03-21 16:42:57 +01:00
Péter Garamvölgyi
122c6f0577 fix batch proof for local testing 2024-03-21 15:40:19 +01:00
zimpha
cccec61b7e merge with develop branch 2024-03-19 17:01:15 +08:00
zimpha
c3e8a3ffa6 fix solc version 2024-03-19 12:23:50 +08:00
zimpha
722b77c269 fix ci for contracts 2024-03-19 12:07:31 +08:00
zimpha
800f3c2674 add decorator in codec 2024-03-19 11:51:26 +08:00
zimpha
055b70ebc9 use version and batch index to choose verifier 2024-03-18 12:27:22 +08:00
zimpha
7764f3ae7a fix comments 2024-03-18 10:13:33 +08:00
zimpha
25f32b5967 fix comments 2024-03-15 15:23:18 +08:00
zimpha
3bb7c63621 Merge remote-tracking branch 'origin/develop' into feat/4844_contract_v1_codec 2024-03-15 15:19:38 +08:00
zimpha
febec99f79 add blob tests 2024-03-15 15:19:17 +08:00
zimpha
cdb061aa0c use ethers-v6 and fix unit tests 2024-03-15 00:16:53 +08:00
zimpha
45de8e491c integrate 4844 encoding 2024-03-14 00:35:58 +08:00
zimpha
6490b70893 Merge remote-tracking branch 'origin/develop' into feat/4844_contract_v1_codec 2024-03-12 17:47:15 +08:00
zimpha
b876b5649d add v1 codec 2024-03-06 15:01:04 +08:00
41 changed files with 1552 additions and 2690 deletions

View File

@@ -19,7 +19,7 @@ CAPELLA_FORK_VERSION: 0x20000092
MAX_WITHDRAWALS_PER_PAYLOAD: 16
# Deneb
DENEB_FORK_EPOCH: 0
DENEB_FORK_EPOCH: 1
DENEB_FORK_VERSION: 0x20000093
# Time parameters

View File

@@ -19,7 +19,7 @@ services:
command:
- testnet
- generate-genesis
- --fork=deneb
- --fork=capella
- --num-validators=64
- --genesis-time-delay=3
- --output-ssz=/data/consensus/genesis.ssz

View File

@@ -15,7 +15,7 @@
"archimedesBlock": 0,
"shanghaiBlock": 0,
"clique": {
"period": 1,
"period": 3,
"epoch": 30000
},
"scroll": {

View File

@@ -26,7 +26,6 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
config.ArrowGlacierBlock,
config.ArchimedesBlock,
config.ShanghaiBlock,
config.BanachBlock,
} {
if fork == nil {
continue

View File

@@ -442,9 +442,9 @@ func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
}
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
func EstimateBatchL1CommitCalldataSize(c *encoding.Batch) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
for _, chunk := range c.Chunks {
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err

View File

@@ -443,55 +443,8 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
return BlobDataProofArgs.Pack(values...)
}
// Blob returns the blob of the batch.
func (b *DABatch) Blob() *kzg4844.Blob {
return b.blob
}
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
// TODO: implement this function.
return nil, nil, nil
}
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
return paddedSize, nil
}
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks)
var batchDataSize uint64
for _, c := range b.Chunks {
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
batchDataSize += chunkDataSize
}
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
return paddedSize, nil
}
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
var dataSize uint64
for _, block := range c.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return 0, err
}
dataSize += uint64(len(rlpTxData))
}
}
}
return dataSize, nil
}

View File

@@ -687,51 +687,6 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
}
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(320), chunk2BlobSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
assert.NoError(t, err)
assert.Equal(t, uint64(320), batch2BlobSize)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
assert.NoError(t, err)
assert.Equal(t, uint64(5952), chunk3BlobSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
assert.NoError(t, err)
assert.Equal(t, uint64(5952), batch3BlobSize)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
assert.NoError(t, err)
assert.Equal(t, uint64(128), chunk4BlobSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
assert.NoError(t, err)
assert.Equal(t, uint64(128), batch4BlobSize)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
assert.NoError(t, err)
assert.Equal(t, uint64(6176), chunk5BlobSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
assert.NoError(t, err)
assert.Equal(t, uint64(128), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
assert.NoError(t, err)
assert.Equal(t, uint64(6208), batch5BlobSize)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
data, err := os.ReadFile(filename)
assert.NoError(t, err)

View File

@@ -8,17 +8,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
)
// CodecVersion defines the version of encoder and decoder.
type CodecVersion int
const (
// CodecV0 represents the version 0 of the encoder and decoder.
CodecV0 CodecVersion = iota
// CodecV1 represents the version 1 of the encoder and decoder.
CodecV1
)
// Block represents an L2 block.
type Block struct {
Header *types.Header
@@ -220,3 +209,8 @@ func (b *Batch) WithdrawRoot() common.Hash {
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
}
// NumChunks gets the number of chunks of the batch.
func (b *Batch) NumChunks() uint64 {
return uint64(len(b.Chunks))
}

View File

@@ -75,6 +75,7 @@ func TestUtilFunctions(t *testing.T) {
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
// Test Batch methods
assert.Equal(t, uint64(3), batch.NumChunks())
assert.Equal(t, block6.Header.Root, batch.StateRoot())
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.75"
var tag = "v4.3.74"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -83,6 +83,45 @@ Return the batch hash of a committed batch.
|---|---|---|
| _0 | bytes32 | undefined |
### finalizeBatch
```solidity
function finalizeBatch(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot) external nonpayable
```
Finalize a committed batch on layer 1 without providing proof.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _prevStateRoot | bytes32 | undefined |
| _postStateRoot | bytes32 | undefined |
| _withdrawRoot | bytes32 | undefined |
### finalizeBatch4844
```solidity
function finalizeBatch4844(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _blobDataProof) external nonpayable
```
Finalize a committed batch (with blob) on layer 1 without providing proof.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _prevStateRoot | bytes32 | undefined |
| _postStateRoot | bytes32 | undefined |
| _withdrawRoot | bytes32 | undefined |
| _blobDataProof | bytes | undefined |
### finalizeBatchWithProof
```solidity

View File

@@ -99,6 +99,18 @@ interface IScrollChain {
bytes calldata aggrProof
) external;
/// @notice Finalize a committed batch on layer 1 without providing proof.
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch.
/// @param prevStateRoot The state root of parent batch.
/// @param postStateRoot The state root of current batch.
/// @param withdrawRoot The withdraw trie root of current batch.
function finalizeBatch(
bytes calldata batchHeader,
bytes32 prevStateRoot,
bytes32 postStateRoot,
bytes32 withdrawRoot
) external;
/// @notice Finalize a committed batch (with blob) on layer 1.
///
/// @dev Memory layout of `blobDataProof`:
@@ -120,4 +132,18 @@ interface IScrollChain {
bytes calldata blobDataProof,
bytes calldata aggrProof
) external;
/// @notice Finalize a committed batch (with blob) on layer 1 without providing proof.
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch`.
/// @param prevStateRoot The state root of parent batch.
/// @param postStateRoot The state root of current batch.
/// @param withdrawRoot The withdraw trie root of current batch.
/// @param blobDataProof The proof for blob data.
function finalizeBatch4844(
bytes calldata batchHeader,
bytes32 prevStateRoot,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata blobDataProof
) external;
}

View File

@@ -402,6 +402,13 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
emit UpdateMaxGasLimit(_oldMaxGasLimit, _newMaxGasLimit);
}
function appendHashes(uint256 _fromQueueIndex, bytes32[] memory _hashes) external {
require(_fromQueueIndex == messageQueue.length, "messageQueue index mismatch");
for (uint256 i = 0; i < _hashes.length; i++) {
messageQueue.push(_hashes[i]);
}
}
/**********************
* Internal Functions *
**********************/

View File

@@ -392,8 +392,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(memPtr);
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
@@ -408,8 +408,47 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV0Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @inheritdoc IScrollChain
function finalizeBatch(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot
) external override OnlyProver whenNotPaused {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
@@ -417,11 +456,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV0Codec.getL1MessagePopped(memPtr)
);
// _popL1Messages(
// BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV0Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
@@ -461,8 +500,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
@@ -489,20 +528,73 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV1Codec.getL1MessagePopped(memPtr)
);
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV1Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @inheritdoc IScrollChain
function finalizeBatch4844(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata _blobDataProof
) external override OnlyProver whenNotPaused {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
// Calls the point evaluation precompile and verifies the output
{
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
abi.encodePacked(_blobVersionedHash, _blobDataProof)
);
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
(, uint256 result) = abi.decode(data, (uint256, uint256));
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV1Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
@@ -877,7 +969,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
unchecked {
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;

View File

@@ -159,6 +159,22 @@ func (o *Batch) GetProvingStatusByHash(ctx context.Context, hash string) (types.
return types.ProvingStatus(batch.ProvingStatus), nil
}
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
}
// GetAttemptsByHash get batch attempts by hash. Used by unit test
func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int16, error) {
db := o.db.WithContext(ctx)
@@ -171,19 +187,6 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1
return batch.ActiveAttempts, batch.TotalAttempts, nil
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
}
return &batch, nil
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil {
@@ -204,13 +207,17 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
}
var startChunkIndex uint64
if batch.Index > 0 {
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
if getErr != nil {
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil {
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// no batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
startChunkIndex = parentBatch.EndChunkIndex + 1
}

View File

@@ -161,6 +161,9 @@ func (o *Chunk) getLatestChunk(ctx context.Context) (*Chunk, error) {
var latestChunk Chunk
if err := db.First(&latestChunk).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Chunk.getLatestChunk error: %w", err)
}
return &latestChunk, nil
@@ -231,7 +234,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
var parentChunkHash string
var parentChunkStateRoot string
parentChunk, err := o.getLatestChunk(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
if err != nil {
log.Error("failed to get latest chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
@@ -248,7 +251,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to create DA chunk", "err", err)
log.Error("failed to initialize new DA chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
@@ -258,6 +261,18 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit gas", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
@@ -266,6 +281,10 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.L2GasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),

File diff suppressed because one or more lines are too long

View File

@@ -10,7 +10,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
@@ -84,7 +83,7 @@ func action(ctx *cli.Context) error {
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
if err != nil {
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
}

View File

@@ -72,18 +72,18 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := config.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry)
if err != nil {
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)

View File

@@ -10,10 +10,11 @@
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"escalate_blocks": 1,
"confirmations": "0x0",
"confirmations": "0x1",
"escalate_multiple_num": 2,
"escalate_multiple_den": 1,
"max_gas_price": 1000000000000,
"max_blob_gas_price": 10000000000000,
"tx_type": "LegacyTx",
"check_pending_time": 1
},
@@ -34,7 +35,7 @@
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"escalate_blocks": 1,
"confirmations": "0x0",
"confirmations": "0x6",
"escalate_multiple_num": 2,
"escalate_multiple_den": 1,
"max_gas_price": 1000000000000,

View File

@@ -3,7 +3,6 @@ package relayer
import (
"context"
"fmt"
"math"
"math/big"
"sort"
"time"
@@ -14,17 +13,13 @@ import (
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
bridgeAbi "scroll-tech/rollup/abi"
@@ -66,12 +61,10 @@ type Layer2Relayer struct {
chainMonitorClient *resty.Client
metrics *l2RelayerMetrics
banachForkHeight uint64
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var gasOracleSender, commitSender, finalizeSender *sender.Sender
var err error
@@ -143,15 +136,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
cfg: cfg,
}
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
// which means rollup-relayer uses codecv0 by default.
// TODO: Must change it to real fork name.
if chainCfg.BanachBlock != nil {
layer2Relayer.banachForkHeight = chainCfg.BanachBlock.Uint64()
} else {
layer2Relayer.banachForkHeight = math.MaxUint64
}
// chain_monitor client
if cfg.ChainMonitor.Enabled {
layer2Relayer.chainMonitorClient = resty.New()
@@ -205,7 +189,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
err = r.db.Transaction(func(dbTX *gorm.DB) error {
var dbChunk *orm.Chunk
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, dbTX)
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk: %v", err)
}
@@ -222,7 +206,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -255,9 +239,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if err != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
}
// submit genesis batch to L1 rollup contract
@@ -300,8 +284,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
return
}
@@ -346,78 +330,95 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
batches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
for _, dbBatch := range dbBatches {
for _, batch := range batches {
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
// get current header and parent header.
daBatch, err := codecv0.NewDABatchFromBytes(batch.BatchHeader)
if err != nil {
log.Error("failed to get chunks in range", "err", err)
log.Error("Failed to initialize new DA batch from bytes", "index", batch.Index, "hash", batch.Hash, "err", err)
return
}
parentBatch := &orm.Batch{}
if batch.Index > 0 {
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
if err != nil {
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
return
}
if types.RollupStatus(parentBatch.RollupStatus) == types.RollupCommitFailed {
log.Error("Previous batch commit failed, halting further committing",
"index", parentBatch.Index, "tx hash", parentBatch.CommitTxHash)
return
}
}
// get the metadata of chunks for the batch
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
if err != nil {
log.Error("Failed to fetch chunks",
"start index", batch.StartChunkIndex,
"end index", batch.EndChunkIndex, "error", err)
return
}
chunks := make([]*encoding.Chunk, len(dbChunks))
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if getErr != nil {
log.Error("failed to get blocks in range", "err", getErr)
var blocks []*encoding.Block
blocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch blocks", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
chunk := &encoding.Chunk{
Blocks: blocks,
}
var daChunk *codecv0.DAChunk
daChunk, err = codecv0.NewDAChunk(chunk, c.TotalL1MessagesPoppedBefore)
if err != nil {
log.Error("Failed to initialize new DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
var daChunkBytes []byte
daChunkBytes, err = daChunk.Encode()
if err != nil {
log.Error("Failed to encode DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
encodedChunks[i] = daChunkBytes
}
if dbBatch.Index == 0 {
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
calldata, err := r.l1RollupABI.Pack("commitBatch", daBatch.Version, parentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if err != nil {
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
return
}
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
if getErr != nil {
log.Error("failed to get parent batch header", "err", getErr)
return
}
var calldata []byte
var blob *kzg4844.Blob
if dbChunks[0].StartBlockNumber < r.banachForkHeight { // codecv0
calldata, err = r.constructCommitBatchPayloadCodecV0(dbBatch, dbParentBatch, dbChunks, chunks)
if err != nil {
log.Error("failed to construct commitBatch payload codecv0", "index", dbBatch.Index, "err", err)
return
}
} else { // codecv1
calldata, blob, err = r.constructCommitBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks)
if err != nil {
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
return
}
}
// fallbackGasLimit is non-zero only in sending non-blob transactions.
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed and failed at least once.
// send transaction
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed failed.
fallbackGasLimit = 0
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
}
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, blob, fallbackGasLimit)
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, calldata, nil, fallbackGasLimit)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"Failed to send commitBatch tx to layer1",
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
@@ -425,13 +426,13 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
}
@@ -500,98 +501,103 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
// Check batch status before send `finalizeBatch` tx.
if r.cfg.ChainMonitor.Enabled {
var batchStatus bool
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
batchStatus, err := r.getBatchStatusByIndex(batch)
if err != nil {
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err)
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
return err
}
if !batchStatus {
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
return err
}
}
if dbBatch.Index == 0 {
return fmt.Errorf("invalid args: batch index is 0, should only happen in finalizing genesis batch")
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
// handle unexpected db error
if err != nil {
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
return err
}
parentBatchStateRoot = parentBatch.StateRoot
}
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
if getErr != nil {
return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr)
}
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return fmt.Errorf("failed to fetch chunks: %w", err)
}
var aggProof *message.BatchProof
var txCalldata []byte
if withProof {
aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash)
if getErr != nil {
return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr)
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
return err
}
if err = aggProof.SanityCheck(); err != nil {
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err)
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
return err
}
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatchWithProof",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
aggProof.Proof,
)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return err
}
} else {
var err error
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatch",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
)
if err != nil {
log.Error("Pack finalizeBatch failed", "err", err)
return err
}
}
var calldata []byte
if dbChunks[0].StartBlockNumber < r.banachForkHeight { // codecv0
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
if err != nil {
return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
}
} else { // codecv1
chunks := make([]*encoding.Chunk, len(dbChunks))
for i, c := range dbChunks {
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if dbErr != nil {
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
}
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
if err != nil {
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
}
}
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, txCalldata, nil, 0)
finalizeTxHash := &txHash
if err != nil {
log.Error(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"calldata", common.Bytes2Hex(txCalldata),
"err", err,
)
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
// record and sync with db, @todo handle db error
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
return err
}
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
@@ -723,140 +729,6 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
}
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, error) {
daBatch, err := codecv0.NewDABatchFromBytes(dbBatch.BatchHeader)
if err != nil {
return nil, fmt.Errorf("failed to create DA batch from bytes: %w", err)
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
daChunk, createErr := codecv0.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
if createErr != nil {
return nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
}
daChunkBytes, encodeErr := daChunk.Encode()
if encodeErr != nil {
return nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr)
}
encodedChunks[i] = daChunkBytes
}
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if packErr != nil {
return nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
}
return calldata, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedBefore + dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedInChunk,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, createErr := codecv1.NewDABatch(batch)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
daChunk, createErr := codecv1.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
}
encodedChunks[i] = daChunk.Encode()
}
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if packErr != nil {
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
}
return calldata, daBatch.Blob(), nil
}
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
if aggProof != nil { // finalizeBatch with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatchWithProof",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
aggProof.Proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr)
}
return calldata, nil
}
// finalizeBatch without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatch",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr)
}
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedBefore + dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedInChunk,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, createErr := codecv1.NewDABatch(batch)
if createErr != nil {
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
}
blobDataProof, getErr := daBatch.BlobDataProof()
if getErr != nil {
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
}
if aggProof != nil { // finalizeBatch4844 with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatchWithProof4844",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
blobDataProof,
aggProof.Proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
}
return calldata, nil
}
// finalizeBatch4844 without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatch4844", // Assuming the "to be implemented" bypass function name is finalizeBatch4844.
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
blobDataProof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -11,9 +11,7 @@ import (
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/params"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -42,173 +40,128 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
return nil
})
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
relayer.StopSenders()
patchGuard.Reset()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
relayer.StopSenders()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
})
assert.True(t, ok)
relayer.StopSenders()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
})
assert.True(t, ok)
}
func testL2RelayerCommitConfirm(t *testing.T) {
@@ -219,7 +172,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -229,13 +182,13 @@ func testL2RelayerCommitConfirm(t *testing.T) {
batchHashes := make([]string, len(isSuccessful))
for i := range batchHashes {
batch := &encoding.Batch{
Index: uint64(i + 1),
Index: uint64(i),
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
batchHashes[i] = dbBatch.Hash
}
@@ -275,7 +228,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -285,13 +238,13 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
batchHashes := make([]string, len(isSuccessful))
for i := range batchHashes {
batch := &encoding.Batch{
Index: uint64(i + 1),
Index: uint64(i),
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
batchHashes[i] = dbBatch.Hash
}
@@ -335,7 +288,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
}
batchOrm := orm.NewBatch(db)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1)
assert.NoError(t, err)
batch2 := &encoding.Batch{
@@ -345,14 +298,14 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
Chunks: []*encoding.Chunk{chunk2},
}
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0)
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2)
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -392,7 +345,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
@@ -487,32 +440,32 @@ func testGetBatchStatusByIndex(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
status, err := relayer.getBatchStatusByIndex(dbBatch)
assert.NoError(t, err)
assert.Equal(t, true, status)

View File

@@ -15,7 +15,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
@@ -26,8 +25,7 @@ var (
// config
cfg *config.Config
base *docker.App
posL1TestEnv *dockercompose.PoSL1TestEnv
base *docker.App
// l2geth client
l2Cli *ethclient.Client
@@ -53,10 +51,9 @@ func setupEnv(t *testing.T) {
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunL2Geth(t)
base.RunDBImage(t)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN,
@@ -98,19 +95,10 @@ func setupEnv(t *testing.T) {
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
base.Free()
var err error
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
if err != nil {
log.Crit("failed to create PoS L1 test environment", "err", err)
}
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
m.Run()
base.Free()
}
func TestFunctions(t *testing.T) {

View File

@@ -37,6 +37,9 @@ const (
// DynamicFeeTxType type for DynamicFeeTx
DynamicFeeTxType = "DynamicFeeTx"
// BlobTxType type for BlobTx
BlobTxType = "BlobTx"
)
// Confirmation struct used to indicate transaction confirmation details
@@ -160,9 +163,8 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
case LegacyTxType:
return s.estimateLegacyGas(target, data, fallbackGasLimit)
case DynamicFeeTxType:
if sidecar == nil {
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
}
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
case BlobTxType:
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
default:
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
@@ -179,7 +181,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
err error
)
if blob != nil {
if s.config.TxType == BlobTxType {
sidecar, err = makeSidecar(blob)
if err != nil {
log.Error("failed to make sidecar for blob transaction", "error", err)
@@ -233,36 +235,39 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
Data: data,
}
case DynamicFeeTxType:
if sidecar == nil {
txData = &gethTypes.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: data,
Gas: feeData.gasLimit,
AccessList: feeData.accessList,
ChainID: s.chainID,
GasTipCap: feeData.gasTipCap,
GasFeeCap: feeData.gasFeeCap,
}
} else {
if target == nil {
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction to address cannot be nil")
}
txData = &gethTypes.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: data,
Gas: feeData.gasLimit,
AccessList: feeData.accessList,
ChainID: s.chainID,
GasTipCap: feeData.gasTipCap,
GasFeeCap: feeData.gasFeeCap,
}
case BlobTxType:
if target == nil {
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction to address cannot be nil")
}
txData = &gethTypes.BlobTx{
ChainID: uint256.MustFromBig(s.chainID),
Nonce: nonce,
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
Gas: feeData.gasLimit,
To: *target,
Data: data,
AccessList: feeData.accessList,
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
BlobHashes: sidecar.BlobHashes(),
Sidecar: sidecar,
}
if sidecar == nil {
log.Error("blob transaction sidecar cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction sidecar cannot be nil")
}
txData = &gethTypes.BlobTx{
ChainID: uint256.MustFromBig(s.chainID),
Nonce: nonce,
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
Gas: feeData.gasLimit,
To: *target,
Data: data,
AccessList: feeData.accessList,
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
BlobHashes: sidecar.BlobHashes(),
Sidecar: sidecar,
}
}
@@ -352,95 +357,94 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
case DynamicFeeTxType:
if tx.BlobTxSidecar() == nil {
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
if originalGasTipCap.Cmp(gasTipCap) == 0 {
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
}
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
} else {
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalBlobGasFeeCap := tx.BlobGasFeeCap()
// bumping at least 100%
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
// adjust for rising blobbasefee
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
blobGasFeeCap = currentBlobGasFeeCap
}
// but don't exceed maxBlobGasPrice
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
blobGasFeeCap = maxBlobGasPrice
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
feeData.blobGasFeeCap = blobGasFeeCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
if originalGasTipCap.Cmp(gasTipCap) == 0 {
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
}
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
case BlobTxType:
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalBlobGasFeeCap := tx.BlobGasFeeCap()
// bumping at least 100%
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
// adjust for rising blobbasefee
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
blobGasFeeCap = currentBlobGasFeeCap
}
// but don't exceed maxBlobGasPrice
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
blobGasFeeCap = maxBlobGasPrice
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
feeData.blobGasFeeCap = blobGasFeeCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
default:
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
}

View File

@@ -45,8 +45,7 @@ var (
cfg *config.Config
base *docker.App
posL1TestEnv *dockercompose.PoSL1TestEnv
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
txTypes = []string{"LegacyTx", "DynamicFeeTx", "BlobTx"}
txUint8Types = []uint8{0, 2, 3}
db *gorm.DB
testContractsAddress common.Address
@@ -81,7 +80,7 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
privateKey = priv
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
base.RunDBImage(t)
db, err = database.InitDB(
@@ -111,7 +110,7 @@ func setupEnv(t *testing.T) {
testContractsAddress = crypto.CreateAddress(auth.From, nonce)
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = l1Client.SendTransaction(context.Background(), signedTx)
@@ -159,14 +158,14 @@ func testNewSender(t *testing.T) {
assert.NoError(t, migrate.ResetDB(sqlDB))
// exit by Stop()
cfgCopy1 := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
newSender1.Stop()
// exit by ctx.Done()
cfgCopy2 := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -181,12 +180,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
hash, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
hash, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
@@ -211,12 +210,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
}
func testFallbackGasLimit(t *testing.T) {
for i, txType := range txTypes {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -226,7 +225,7 @@ func testFallbackGasLimit(t *testing.T) {
assert.NoError(t, err)
// FallbackGasLimit = 0
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
@@ -246,7 +245,7 @@ func testFallbackGasLimit(t *testing.T) {
},
)
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, txBlob[i], 100000)
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, randBlob(), 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
@@ -264,8 +263,8 @@ func testFallbackGasLimit(t *testing.T) {
}
func testResubmitZeroGasPriceTransaction(t *testing.T) {
for i, txType := range txTypes {
if txBlob[i] != nil {
for _, txType := range txTypes {
if txType == BlobTxType {
continue
}
@@ -273,7 +272,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -313,7 +312,7 @@ func testAccessListTransactionGasLimit(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -324,20 +323,16 @@ func testAccessListTransactionGasLimit(t *testing.T) {
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(int64(i+1)))
assert.NoError(t, err)
var sidecar *gethTypes.BlobTxSidecar
if txBlob[i] != nil {
sidecar, err = makeSidecar(txBlob[i])
assert.NoError(t, err)
}
sidecar, err := makeSidecar(randBlob())
assert.NoError(t, err)
gasLimit, accessList, err := s.estimateGasLimit(&testContractsAddress, data, sidecar, nil, big.NewInt(1000000000), big.NewInt(1000000000), big.NewInt(1000000000))
assert.NoError(t, err)
if txType == LegacyTxType { // Legacy transactions can not have an access list.
assert.Equal(t, uint64(43956), gasLimit)
assert.Equal(t, uint64(43935), gasLimit)
assert.Nil(t, accessList)
} else { // Dynamic fee and blob transactions can have an access list.
assert.Equal(t, uint64(43479), gasLimit)
assert.Equal(t, uint64(43458), gasLimit)
assert.NotNil(t, accessList)
}
@@ -346,12 +341,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
}
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
for i, txType := range txTypes {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 110
cfgCopy.EscalateMultipleDen = 100
@@ -365,11 +360,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
blobGasFeeCap: big.NewInt(1000000000),
gasLimit: 50000,
}
var sidecar *gethTypes.BlobTxSidecar
if txBlob[i] != nil {
sidecar, err = makeSidecar(txBlob[i])
assert.NoError(t, err)
}
sidecar, err := makeSidecar(randBlob())
assert.NoError(t, err)
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil)
assert.NoError(t, err)
assert.NotNil(t, tx)
@@ -391,8 +383,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
}
func testResubmitUnderpricedTransaction(t *testing.T) {
for i, txType := range txTypes {
if txBlob[i] != nil {
for _, txType := range txTypes {
if txType == BlobTxType {
continue
}
@@ -400,7 +392,7 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 109
cfgCopy.EscalateMultipleDen = 100
@@ -439,7 +431,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
assert.NoError(t, migrate.ResetDB(sqlDB))
txType := "DynamicFeeTx"
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -481,8 +473,9 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy.TxType = DynamicFeeTxType
txType := "BlobTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -538,7 +531,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
assert.NoError(t, err)
@@ -579,7 +572,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
@@ -639,7 +632,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
@@ -709,7 +702,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
@@ -784,8 +777,8 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
)
assert.NoError(t, err)
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy.TxType = DynamicFeeTxType
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = BlobTxType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
assert.NoError(t, err)
defer s.Stop()

View File

@@ -3,7 +3,6 @@ package watcher
import (
"context"
"fmt"
"math"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -15,10 +14,10 @@ import (
"scroll-tech/common/forks"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
// BatchProposer proposes batches based on available unbatched chunks.
@@ -36,7 +35,6 @@ type BatchProposer struct {
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
forkMap map[uint64]bool
banachForkHeight uint64
batchProposerCircleTotal prometheus.Counter
proposeBatchFailureTotal prometheus.Counter
@@ -44,7 +42,6 @@ type BatchProposer struct {
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
@@ -61,7 +58,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"forkHeights", forkHeights)
p := &BatchProposer{
return &BatchProposer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
@@ -98,10 +95,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
Name: "rollup_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
@@ -115,31 +108,22 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
Help: "Total number of batch chunk propose not enough",
}),
}
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
// which means batch-proposer uses codecv0 by default.
// TODO: Must change it to real fork name.
if chainCfg.BanachBlock != nil {
p.banachForkHeight = chainCfg.BanachBlock.Uint64()
} else {
p.banachForkHeight = math.MaxUint64
}
return p
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
if err := p.proposeBatch(); err != nil {
batch, err := p.proposeBatch()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
}
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion) error {
err := p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
if batch == nil {
return
}
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex, "error", dbErr)
@@ -156,23 +140,22 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
return nil
}
func (p *BatchProposer) proposeBatch() error {
func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
if err != nil {
return err
return nil, err
}
// select at most p.maxChunkNumPerBatch chunks
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
if err != nil {
return err
return nil, err
}
if len(dbChunks) == 0 {
return nil
return nil, nil
}
maxChunksThisBatch := p.maxChunkNumPerBatch
@@ -187,88 +170,124 @@ func (p *BatchProposer) proposeBatch() error {
}
}
codecVersion := encoding.CodecV0
if dbChunks[0].StartBlockNumber >= p.banachForkHeight {
codecVersion = encoding.CodecV1
}
daChunks, err := p.getDAChunks(dbChunks)
if err != nil {
return err
return nil, err
}
dbParentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
parentDBBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return err
return nil, err
}
var batch encoding.Batch
batch.Index = dbParentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
parentBatchEndBlockNumber := daChunks[0].Blocks[0].Header.Number.Uint64() - 1
parentBatchCodecVersion := encoding.CodecV0
if dbParentBatch.Index > 0 && parentBatchEndBlockNumber >= p.banachForkHeight {
parentBatchCodecVersion = encoding.CodecV1
}
batch.TotalL1MessagePoppedBefore, err = utils.GetTotalL1MessagePoppedBeforeBatch(dbParentBatch.BatchHeader, parentBatchCodecVersion)
if err != nil {
return err
if parentDBBatch != nil {
batch.Index = parentDBBatch.Index + 1
parentDABatch, err := codecv0.NewDABatchFromBytes(parentDBBatch.BatchHeader)
if err != nil {
return nil, err
}
batch.TotalL1MessagePoppedBefore = parentDABatch.TotalL1MessagePopped
batch.ParentBatchHash = common.HexToHash(parentDBBatch.Hash)
}
for i, chunk := range daChunks {
batch.Chunks = append(batch.Chunks, chunk)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return nil, err
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
metrics.L1CommitBlobSize > maxBlobSize {
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, p.maxChunkNumPerBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentL1CommitCalldataSize", metrics.L1CommitCalldataSize,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
metrics, err := utils.CalculateBatchMetrics(&batch, codecVersion)
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
return nil, err
}
p.recordBatchMetrics(metrics)
return p.updateDBBatchInfo(&batch, codecVersion)
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.batchChunksNum.Set(float64(batch.NumChunks()))
return &batch, nil
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == maxChunksThisBatch {
log.Info("reached maximum number of chunks in batch or first block timeout",
"chunk count", metrics.NumChunks,
"start block number", dbChunks[0].StartBlockNumber,
"start block timestamp", dbChunks[0].StartBlockTime,
"current time", currentTimeSec)
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
batch.NumChunks() == maxChunksThisBatch {
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"start block number", dbChunks[0].StartBlockNumber,
"start block timestamp", dbChunks[0].StartBlockTime,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of chunks in batch",
"chunk count", batch.NumChunks(),
)
}
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return nil, err
}
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
p.batchFirstBlockTimeoutReached.Inc()
p.recordBatchMetrics(metrics)
return p.updateDBBatchInfo(&batch, codecVersion)
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.batchChunksNum.Set(float64(batch.NumChunks()))
return &batch, nil
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil
return nil, nil
}
func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, error) {
@@ -285,10 +304,3 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
}
return chunks, nil
}
func (p *BatchProposer) recordBatchMetrics(metrics *utils.BatchMetrics) {
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.batchChunksNum.Set(float64(metrics.NumChunks))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
}

View File

@@ -2,12 +2,9 @@ package watcher
import (
"context"
"math"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
@@ -19,7 +16,7 @@ import (
"scroll-tech/rollup/internal/orm"
)
func testBatchProposerCodecv0Limits(t *testing.T) {
func testBatchProposerLimits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
@@ -107,31 +104,8 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -148,7 +122,8 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
@@ -166,142 +141,17 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, tt.expectedBatchesLen+1)
batches = batches[1:]
if tt.expectedBatchesLen > 0 {
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
assert.NoError(t, err)
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
}
})
}
}
func testBatchProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
batchTimeoutSec uint64
forkBlock *big.Int
expectedBatchesLen int
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
}{
{
name: "NoLimitReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "Timeout",
maxChunkNum: 10,
batchTimeoutSec: 0,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxChunkNumPerBatchIs1",
maxChunkNum: 1,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "ForkBlockReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
forkBlock: big.NewInt(3),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: tt.maxChunkNum,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, tt.expectedBatchesLen+1)
batches = batches[1:]
assert.Len(t, batches, tt.expectedBatchesLen)
if tt.expectedBatchesLen > 0 {
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1)
assert.NoError(t, err)
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
for _, chunk := range dbChunks {
@@ -317,31 +167,8 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -356,7 +183,8 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
@@ -372,16 +200,16 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
}, &params.ChainConfig{}, db, nil)
bp.TryProposeBatch()
batchOrm := orm.NewBatch(db)
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
batches = batches[1:]
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
assert.Len(t, batches, 1)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, uint64(1), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, dbChunks, 2)
for _, chunk := range dbChunks {
@@ -392,76 +220,3 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
assert.Equal(t, uint64(258383), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
}
func testBatchProposerBlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for total := int64(0); total < 7; total++ {
for i := int64(0); i < 10; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(total*10 + i + 1)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: math.MaxUint64,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
for i := 0; i < 10; i++ {
bp.TryProposeBatch()
}
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
batches = batches[1:]
assert.NoError(t, err)
assert.Len(t, batches, 4)
for i, batch := range batches {
expected := uint64(2 * (i + 1))
if expected > 7 {
expected = 7
}
assert.Equal(t, expected, batch.EndChunkIndex)
}
}

View File

@@ -3,7 +3,6 @@ package watcher
import (
"context"
"fmt"
"math"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -14,14 +13,12 @@ import (
"scroll-tech/common/forks"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
const maxBlobSize = uint64(131072)
// ChunkProposer proposes chunks based on available unchunked blocks.
type ChunkProposer struct {
ctx context.Context
@@ -38,7 +35,6 @@ type ChunkProposer struct {
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
forkHeights []uint64
banachForkHeight uint64
chunkProposerCircleTotal prometheus.Counter
proposeChunkFailureTotal prometheus.Counter
@@ -47,7 +43,6 @@ type ChunkProposer struct {
chunkTxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
@@ -66,7 +61,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"forkHeights", forkHeights)
p := &ChunkProposer{
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
@@ -108,10 +103,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
@@ -129,36 +120,32 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
Help: "Total number of chunk block propose not enough",
}),
}
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
// which means chunk-proposer uses codecv0 by default.
// TODO: Must change it to real fork name.
if chainCfg.BanachBlock != nil {
p.banachForkHeight = chainCfg.BanachBlock.Uint64()
} else {
p.banachForkHeight = math.MaxUint64
}
return p
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
if err := p.proposeChunk(); err != nil {
proposedChunk, err := p.proposeChunk()
if err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
}
}
func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) error {
func (p *ChunkProposer) updateChunkInfoInDB(chunk *encoding.Chunk) error {
if chunk == nil {
return nil
}
p.proposeChunkUpdateInfoTotal.Inc()
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecVersion, dbTX)
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
log.Warn("ChunkProposer.InsertChunk failed", "err", err)
return err
@@ -169,18 +156,13 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
}
return nil
})
if err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
return err
}
return nil
return err
}
func (p *ChunkProposer) proposeChunk() error {
func (p *ChunkProposer) proposeChunk() (*encoding.Chunk, error) {
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
if err != nil {
return err
return nil, err
}
maxBlocksThisChunk := p.maxBlockNumPerChunk
@@ -192,91 +174,157 @@ func (p *ChunkProposer) proposeChunk() error {
// select at most maxBlocksThisChunk blocks
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
if err != nil {
return err
return nil, err
}
if len(blocks) == 0 {
return nil
}
codecVersion := encoding.CodecV0
if blocks[0].Header.Number.Uint64() >= p.banachForkHeight {
codecVersion = encoding.CodecV1
return nil, nil
}
var chunk encoding.Chunk
for i, block := range blocks {
chunk.Blocks = append(chunk.Blocks, block)
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.TxNum > p.maxTxNumPerChunk ||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
metrics.L1CommitBlobSize > maxBlobSize {
totalTxNum := chunk.NumTransactions()
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalTxNum > p.maxTxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
if totalTxNum > p.maxTxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalTxNum,
p.maxTxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, crc max: %v, limit: %v",
block.Header.Number,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"txNum", metrics.TxNum,
"maxTxNum", p.maxTxNumPerChunk,
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
"overEstimatedL1CommitGas", overEstimatedL1CommitGas,
"maxL1CommitGas", p.maxL1CommitGasPerChunk,
"rowConsumption", metrics.CrcMax,
"maxRowConsumption", p.maxRowConsumptionPerChunk,
"maxBlobSize", maxBlobSize)
"totalTxNum", totalTxNum,
"maxTxNumPerChunk", p.maxTxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"chunkRowConsumptionMax", crcMax,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
p.recordChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion)
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.maxTxConsumption.Set(float64(crcMax))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
}
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
log.Info("reached maximum number of blocks in chunk or first block timeout",
"start block number", chunk.Blocks[0].Header.Number,
"block count", len(chunk.Blocks),
"block number", chunk.Blocks[0].Header.Number,
"block timestamp", metrics.FirstBlockTimestamp,
"current time", currentTimeSec)
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec ||
uint64(len(chunk.Blocks)) == maxBlocksThisChunk {
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"block number", chunk.Blocks[0].Header.Number,
"block timestamp", chunk.Blocks[0].Header.Time,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of blocks in chunk",
"start block number", chunk.Blocks[0].Header.Number,
"block count", len(chunk.Blocks),
)
}
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
p.chunkFirstBlockTimeoutReached.Inc()
p.recordChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion)
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.maxTxConsumption.Set(float64(crcMax))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil
}
func (p *ChunkProposer) recordChunkMetrics(metrics *utils.ChunkMetrics) {
p.chunkTxNum.Set(float64(metrics.TxNum))
p.maxTxConsumption.Set(float64(metrics.CrcMax))
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
return nil, nil
}

View File

@@ -5,7 +5,6 @@ import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
@@ -16,7 +15,7 @@ import (
"scroll-tech/rollup/internal/orm"
)
func testChunkProposerCodecv0Limits(t *testing.T) {
func testChunkProposerLimits(t *testing.T) {
tests := []struct {
name string
maxBlockNum uint64
@@ -199,164 +198,3 @@ func testChunkProposerCodecv0Limits(t *testing.T) {
})
}
}
func testChunkProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxBlockNum uint64
maxTxNum uint64
maxRowConsumption uint64
chunkTimeoutSec uint64
forkBlock *big.Int
expectedChunksLen int
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
}{
{
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 0,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 2,
},
{
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "ForkBlockReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
forkBlock: big.NewInt(2),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, tt.expectedChunksLen)
if len(chunks) > 0 {
blockOrm := orm.NewL2Block(db)
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
assert.NoError(t, err)
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
firstChunkHash := chunks[0].Hash
for _, chunkHash := range chunkHashes {
assert.Equal(t, firstChunkHash, chunkHash)
}
}
})
}
}
func testChunkProposerCodecv1BlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
for i := int64(0); i < 2000; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(i + 1)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
for i := 0; i < 10; i++ {
cp.TryProposeChunk()
}
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 4)
for i, chunk := range chunks {
expected := uint64(551 * (i + 1))
if expected > 2000 {
expected = 2000
}
assert.Equal(t, expected, chunk.EndBlockNumber)
}
}

View File

@@ -59,7 +59,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
}
}
const blocksFetchLimit = uint64(10)
const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
@@ -71,8 +71,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
}
// Fetch and store block traces for missing blocks
for from := heightInDB + 1; from <= blockHeight; from += blocksFetchLimit {
to := from + blocksFetchLimit - 1
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {
to = blockHeight

View File

@@ -57,9 +57,25 @@ func setupEnv(t *testing.T) (err error) {
l2Cli, err = base.L2Client()
assert.NoError(t, err)
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block2 = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
block1 = &encoding.Block{}
if err = json.Unmarshal(templateBlockTrace1, block1); err != nil {
return err
}
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
block2 = &encoding.Block{}
if err = json.Unmarshal(templateBlockTrace2, block2); err != nil {
return err
}
return err
}
@@ -97,22 +113,9 @@ func TestFunction(t *testing.T) {
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
// Run chunk proposer test cases.
t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits)
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
t.Run("TestChunkProposerCodecv1BlobSizeLimit", testChunkProposerCodecv1BlobSizeLimit)
t.Run("TestChunkProposerLimits", testChunkProposerLimits)
// Run chunk proposer test cases.
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
t.Run("TestBatchProposerLimits", testBatchProposerLimits)
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
data, err := os.ReadFile(filename)
assert.NoError(t, err)
block := &encoding.Block{}
assert.NoError(t, json.Unmarshal(data, block))
return block
}

View File

@@ -7,15 +7,14 @@ import (
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
rutils "scroll-tech/rollup/internal/utils"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
// Batch represents a batch of chunks.
@@ -137,6 +136,9 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
@@ -147,7 +149,12 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
// Get the latest batch
latestBatch, err := o.GetLatestBatch(ctx)
if err != nil {
return 0, fmt.Errorf("Batch.GetFirstUnbatchedChunkIndex error: %w", err)
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
// which means there is not batched chunk yet, thus returns 0
if latestBatch == nil {
return 0, nil
}
return latestBatch.EndChunkIndex + 1, nil
}
@@ -219,7 +226,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Batch, error) {
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil {
return nil, errors.New("invalid args: batch is nil")
}
@@ -229,48 +236,94 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
return nil, errors.New("invalid args: batch contains 0 chunk")
}
metrics, err := rutils.CalculateBatchMetrics(batch, codecVersion)
daBatch, err := codecv0.NewDABatch(batch)
if err != nil {
log.Error("failed to calculate batch metrics",
log.Error("failed to create new DA batch",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, err
}
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(batch)
if err != nil {
log.Error("failed to estimate batch L1 commit gas",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(batch)
if err != nil {
log.Error("failed to estimate batch L1 commit calldata size",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
var startChunkIndex uint64
if batch.Index > 0 {
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
if getErr != nil {
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil {
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// no batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
startDAChunkHash, err := startDAChunk.Hash()
if err != nil {
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
for i := uint64(0); i < numChunks-1; i++ {
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
}
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
endDAChunkHash, err := endDAChunk.Hash()
if err != nil {
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
Hash: daBatch.Hash().Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
EndChunkHash: endDAChunkHash.Hex(),
EndChunkIndex: startChunkIndex + numChunks - 1,
StateRoot: batch.StateRoot().Hex(),
WithdrawRoot: batch.WithdrawRoot().Hex(),
ParentBatchHash: batch.ParentBatchHash.Hex(),
BatchHeader: batchMeta.BatchBytes,
BatchHeader: daBatch.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
}
db := o.db

View File

@@ -8,8 +8,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/rollup/internal/utils"
"scroll-tech/common/types/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -111,7 +110,7 @@ func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
// Get the latest chunk
latestChunk, err := o.getLatestChunk(ctx)
if err != nil {
return 0, fmt.Errorf("Chunk.GetUnchunkedBlockHeight error: %w", err)
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
if latestChunk == nil {
// if there is no chunk, return block number 1,
@@ -141,7 +140,7 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
@@ -166,30 +165,42 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
parentChunkStateRoot = parentChunk.StateRoot
}
metrics, err := utils.CalculateChunkMetrics(chunk, codecVersion)
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to calculate chunk metrics", "err", err)
log.Error("failed to initialize new DA chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
daChunkHash, err := daChunk.Hash()
if err != nil {
log.Error("failed to get chunk hash", "err", err)
log.Error("failed to get DA chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit gas", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
Hash: chunkHash.Hex(),
Hash: daChunkHash.Hex(),
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.L2GasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),

View File

@@ -17,7 +17,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/database/migrate"
)
@@ -30,8 +29,12 @@ var (
batchOrm *Batch
pendingTransactionOrm *PendingTransaction
block1 *encoding.Block
block2 *encoding.Block
block1 *encoding.Block
block2 *encoding.Block
chunk1 *encoding.Chunk
chunk2 *encoding.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
func TestMain(m *testing.M) {
@@ -74,6 +77,18 @@ func setupEnv(t *testing.T) {
block2 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace, block2)
assert.NoError(t, err)
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
daChunk1, err := codecv0.NewDAChunk(chunk1, 0)
assert.NoError(t, err)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
daChunk2, err := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, err)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
}
func tearDownEnv(t *testing.T) {
@@ -165,196 +180,147 @@ func TestL2BlockOrm(t *testing.T) {
}
func TestChunkOrm(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
for _, codecVersion := range codecVersions {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
var chunkHash1 common.Hash
var chunkHash2 common.Hash
if codecVersion == encoding.CodecV0 {
daChunk1, createErr := codecv0.NewDAChunk(chunk1, 0)
assert.NoError(t, createErr)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
daChunk2, createErr := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, createErr)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
} else {
daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0)
assert.NoError(t, createErr)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
daChunk2, createErr := codecv1.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, createErr)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
}
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
assert.NoError(t, err)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
func TestBatchOrm(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
for _, codecVersion := range codecVersions {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
var batchHash1 string
if codecVersion == encoding.CodecV0 {
daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, createErr)
batchHash1 = daBatch1.Hash().Hex()
} else {
daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, createErr)
batchHash1 = daBatch1.Hash().Hex()
}
assert.Equal(t, hash1, batchHash1)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk2},
}
batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
var batchHash2 string
if codecVersion == encoding.CodecV0 {
daBatch2, createErr := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, createErr)
batchHash2 = daBatch2.Hash().Hex()
} else {
daBatch2, createErr := codecv1.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, createErr)
batchHash2 = daBatch2.Hash().Hex()
}
assert.Equal(t, hash2, batchHash2)
count, err := batchOrm.GetBatchCount(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
assert.NoError(t, err)
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
assert.NoError(t, err)
assert.Equal(t, 2, len(pendingBatches))
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, 2, len(rollupStatus))
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch1, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
daBatch1, err := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, err)
batchHash1 := daBatch1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch2, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
daBatch2, err := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, err)
batchHash2 := daBatch2.Hash().Hex()
assert.Equal(t, hash2, batchHash2)
count, err := batchOrm.GetBatchCount(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
assert.NoError(t, err)
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
assert.NoError(t, err)
assert.Equal(t, 2, len(pendingBatches))
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, 2, len(rollupStatus))
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
}
func TestPendingTransactionOrm(t *testing.T) {
func TestTransactionOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))

View File

@@ -9,10 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
bridgeAbi "scroll-tech/rollup/abi"
)
@@ -67,228 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ChunkMetrics indicates the metrics for proposing a chunk.
type ChunkMetrics struct {
// common metrics
NumBlocks uint64
TxNum uint64
CrcMax uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
}
// CalculateChunkMetrics calculates chunk metrics.
func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) (*ChunkMetrics, error) {
var err error
metrics := &ChunkMetrics{
TxNum: chunk.NumTransactions(),
NumBlocks: uint64(len(chunk.Blocks)),
FirstBlockTimestamp: chunk.Blocks[0].Header.Time,
}
metrics.CrcMax, err = chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
switch codecVersion {
case encoding.CodecV0:
metrics.L1CommitCalldataSize, err = codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
metrics.L1CommitGas, err = codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
}
return metrics, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// BatchMetrics indicates the metrics for proposing a batch.
type BatchMetrics struct {
// common metrics
NumChunks uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
var err error
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
}
switch codecVersion {
case encoding.CodecV0:
metrics.L1CommitGas, err = codecv0.EstimateBatchL1CommitGas(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit gas: %w", err)
}
metrics.L1CommitCalldataSize, err = codecv0.EstimateBatchL1CommitCalldataSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size: %w", err)
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
}
return metrics, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// GetChunkHash retrieves the hash of a chunk.
func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, codecVersion encoding.CodecVersion) (common.Hash, error) {
switch codecVersion {
case encoding.CodecV0:
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to create codecv0 DA chunk: %w", err)
}
chunkHash, err := daChunk.Hash()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get codecv0 DA chunk hash: %w", err)
}
return chunkHash, nil
case encoding.CodecV1:
daChunk, err := codecv1.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to create codecv1 DA chunk: %w", err)
}
chunkHash, err := daChunk.Hash()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get codecv1 DA chunk hash: %w", err)
}
return chunkHash, nil
default:
return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// BatchMetadata represents the metadata of a batch.
type BatchMetadata struct {
BatchHash common.Hash
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
numChunks := len(batch.Chunks)
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
for i := 0; i < numChunks-1; i++ {
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
}
switch codecVersion {
case encoding.CodecV0:
daBatch, err := codecv0.NewDABatch(batch)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 start DA chunk: %w", err)
}
batchMeta.StartChunkHash, err = startDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv0 start DA chunk hash: %w", err)
}
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 end DA chunk: %w", err)
}
batchMeta.EndChunkHash, err = endDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv0 end DA chunk hash: %w", err)
}
return batchMeta, nil
case encoding.CodecV1:
daBatch, err := codecv1.NewDABatch(batch)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 start DA chunk: %w", err)
}
batchMeta.StartChunkHash, err = startDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 start DA chunk hash: %w", err)
}
endDAChunk, err := codecv1.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 end DA chunk: %w", err)
}
batchMeta.EndChunkHash, err = endDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 end DA chunk hash: %w", err)
}
return batchMeta, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// GetTotalL1MessagePoppedBeforeBatch retrieves the total L1 messages popped before the batch.
func GetTotalL1MessagePoppedBeforeBatch(parentBatchBytes []byte, codecVersion encoding.CodecVersion) (uint64, error) {
switch codecVersion {
case encoding.CodecV0:
parentDABatch, err := codecv0.NewDABatchFromBytes(parentBatchBytes)
if err != nil {
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv0, err: %w", err)
}
return parentDABatch.TotalL1MessagePopped, nil
case encoding.CodecV1:
parentDABatch, err := codecv1.NewDABatchFromBytes(parentBatchBytes)
if err != nil {
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv1, err: %w", err)
}
return parentDABatch.TotalL1MessagePopped, nil
default:
return 0, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}

View File

@@ -1,607 +1,346 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {BatchHeaderV0Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
import {BatchHeaderV1Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV1Codec.sol";
import {ChunkCodecV0} from "../../../contracts/src/libraries/codec/ChunkCodecV0.sol";
import {ChunkCodecV1} from "../../../contracts/src/libraries/codec/ChunkCodecV1.sol";
import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
import {ChunkCodecV0} from "../../contracts/src/libraries/codec/ChunkCodecV0.sol";
import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol";
contract MockBridge {
/// @dev Thrown when committing a committed batch.
error ErrorBatchIsAlreadyCommitted();
/******************************
* Events from L1MessageQueue *
******************************/
/// @dev Thrown when finalizing a verified batch.
error ErrorBatchIsAlreadyVerified();
/// @notice Emitted when a new L1 => L2 transaction is appended to the queue.
/// @param sender The address of account who initiates the transaction.
/// @param target The address of account who will recieve the transaction.
/// @param value The value passed with the transaction.
/// @param queueIndex The index of this transaction in the queue.
/// @param gasLimit Gas limit required to complete the message relay on L2.
/// @param data The calldata of the transaction.
event QueueTransaction(
address indexed sender,
address indexed target,
uint256 value,
uint64 queueIndex,
uint256 gasLimit,
bytes data
);
/// @dev Thrown when committing empty batch (batch without chunks)
error ErrorBatchIsEmpty();
/*********************************
* Events from L1ScrollMessenger *
*********************************/
/// @dev Thrown when call precompile failed.
error ErrorCallPointEvaluationPrecompileFailed();
/// @notice Emitted when a cross domain message is sent.
/// @param sender The address of the sender who initiates the message.
/// @param target The address of target contract to call.
/// @param value The amount of value passed to the target contract.
/// @param messageNonce The nonce of the message.
/// @param gasLimit The optional gas limit passed to L1 or L2.
/// @param message The calldata passed to the target contract.
event SentMessage(
address indexed sender,
address indexed target,
uint256 value,
uint256 messageNonce,
uint256 gasLimit,
bytes message
);
/// @dev Thrown when the transaction has multiple blobs.
error ErrorFoundMultipleBlob();
/// @notice Emitted when a cross domain message is relayed successfully.
/// @param messageHash The hash of the message.
event RelayedMessage(bytes32 indexed messageHash);
/// @dev Thrown when some fields are not zero in genesis batch.
error ErrorGenesisBatchHasNonZeroField();
/***************************
* Events from ScrollChain *
***************************/
/// @dev Thrown when importing genesis batch twice.
error ErrorGenesisBatchImported();
/// @notice Emitted when a new batch is committed.
/// @param batchHash The hash of the batch.
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
/// @dev Thrown when data hash in genesis batch is zero.
error ErrorGenesisDataHashIsZero();
/// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/// @dev Thrown when the parent batch hash in genesis batch is zero.
error ErrorGenesisParentBatchHashIsNonZero();
/***********
* Structs *
***********/
/// @dev Thrown when the l2 transaction is incomplete.
error ErrorIncompleteL2TransactionData();
struct L2MessageProof {
// The index of the batch where the message belongs to.
uint256 batchIndex;
// Concatenation of merkle proof for withdraw merkle trie.
bytes merkleProof;
}
/// @dev Thrown when the batch hash is incorrect.
error ErrorIncorrectBatchHash();
/*************
* Variables *
*************/
/// @dev Thrown when the batch index is incorrect.
error ErrorIncorrectBatchIndex();
uint256 public messageNonce;
mapping(uint256 => bytes32) public committedBatches;
uint256 public l2BaseFee;
/// @dev Thrown when the previous state root doesn't match stored one.
error ErrorIncorrectPreviousStateRoot();
/***********************************
* Functions from L2GasPriceOracle *
***********************************/
/// @dev Thrown when the batch header version is invalid.
error ErrorInvalidBatchHeaderVersion();
function setL2BaseFee(uint256 _newL2BaseFee) external {
l2BaseFee = _newL2BaseFee;
}
/// @dev Thrown when no blob found in the transaction.
error ErrorNoBlobFound();
/************************************
* Functions from L1ScrollMessenger *
************************************/
/// @dev Thrown when the number of transactions is less than number of L1 message in one block.
error ErrorNumTxsLessThanNumL1Msgs();
/// @dev Thrown when the given previous state is zero.
error ErrorPreviousStateRootIsZero();
/// @dev Thrown when the given state root is zero.
error ErrorStateRootIsZero();
/// @dev Thrown when a chunk contains too many transactions.
error ErrorTooManyTxsInOneChunk();
/// @dev Thrown when the precompile output is incorrect.
error ErrorUnexpectedPointEvaluationPrecompileOutput();
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
struct L2MessageProof {
uint256 batchIndex;
bytes merkleProof;
}
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 public l2BaseFee;
uint256 public lastFinalizedBatchIndex;
mapping(uint256 => bytes32) public committedBatches;
mapping(uint256 => bytes32) public finalizedStateRoots;
mapping(uint256 => bytes32) public withdrawRoots;
function setL2BaseFee(uint256 _newL2BaseFee) external {
l2BaseFee = _newL2BaseFee;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Import layer 2 genesis block
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
// check genesis batch header length
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// check whether the genesis batch is imported
if (finalizedStateRoots[0] != bytes32(0)) revert ErrorGenesisBatchImported();
(uint256 memPtr, bytes32 _batchHash, , ) = _loadBatchHeader(_batchHeader);
// check all fields except `dataHash` and `lastBlockHash` are zero
unchecked {
uint256 sum = BatchHeaderV0Codec.getVersion(memPtr) +
BatchHeaderV0Codec.getBatchIndex(memPtr) +
BatchHeaderV0Codec.getL1MessagePopped(memPtr) +
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr);
if (sum != 0) revert ErrorGenesisBatchHasNonZeroField();
}
if (BatchHeaderV0Codec.getDataHash(memPtr) == bytes32(0)) revert ErrorGenesisDataHashIsZero();
if (BatchHeaderV0Codec.getParentBatchHash(memPtr) != bytes32(0)) revert ErrorGenesisParentBatchHashIsNonZero();
committedBatches[0] = _batchHash;
finalizedStateRoots[0] = _stateRoot;
emit CommitBatch(0, _batchHash);
emit FinalizeBatch(0, _batchHash, _stateRoot, bytes32(0));
}
function commitBatch(
uint8 _version,
bytes calldata _parentBatchHeader,
bytes[] memory _chunks,
bytes calldata
) external {
// check whether the batch is empty
if (_chunks.length == 0) revert ErrorBatchIsEmpty();
(, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader(
_parentBatchHeader
);
unchecked {
_batchIndex += 1;
}
if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted();
bytes32 _batchHash;
uint256 batchPtr;
bytes32 _dataHash;
uint256 _totalL1MessagesPoppedInBatch;
if (_version == 0) {
(_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV0(
_totalL1MessagesPoppedOverall,
_chunks
);
assembly {
batchPtr := mload(0x40)
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV0Codec.storeVersion(batchPtr, 0);
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
BatchHeaderV0Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
// compute batch hash
_batchHash = BatchHeaderV0Codec.computeBatchHash(
batchPtr,
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH
);
} else if (_version == 1) {
bytes32 blobVersionedHash;
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
_totalL1MessagesPoppedOverall,
_chunks
);
assembly {
batchPtr := mload(0x40)
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash);
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash);
BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
// compute batch hash
_batchHash = BatchHeaderV1Codec.computeBatchHash(
batchPtr,
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH
);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
committedBatches[_batchIndex] = _batchHash;
emit CommitBatch(_batchIndex, _batchHash);
}
/// @dev We keep this function to upgrade to 4844 more smoothly.
function finalizeBatchWithProof(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata
) external {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @dev Memory layout of `_blobDataProof`:
/// ```text
/// | z | y | kzg_commitment | kzg_proof |
/// |---------|---------|----------------|-----------|
/// | bytes32 | bytes32 | bytes48 | bytes48 |
/// ```
function finalizeBatchWithProof4844(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata _blobDataProof,
bytes calldata
) external {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
// Calls the point evaluation precompile and verifies the output
{
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
abi.encodePacked(_blobVersionedHash, _blobDataProof)
);
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
(, uint256 result) = abi.decode(data, (uint256, uint256));
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to commit chunks with version 0
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
/// @param _chunks The list of chunks to commit.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
function _commitChunksV0(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks
) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
uint256 _chunksLength = _chunks.length;
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
uint256 batchDataHashPtr;
assembly {
batchDataHashPtr := mload(0x40)
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
}
// compute the data hash for each chunk
for (uint256 i = 0; i < _chunksLength; i++) {
uint256 _totalNumL1MessagesInChunk;
bytes32 _chunkDataHash;
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV0(
_chunks[i],
_totalL1MessagesPoppedInBatch,
_totalL1MessagesPoppedOverall
);
unchecked {
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
}
assembly {
mstore(batchDataHashPtr, _chunkDataHash)
batchDataHashPtr := add(batchDataHashPtr, 0x20)
}
}
assembly {
let dataLen := mul(_chunksLength, 0x20)
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
}
}
/// @dev Internal function to commit chunks with version 1
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
/// @param _chunks The list of chunks to commit.
/// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
function _commitChunksV1(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks
)
internal
view
returns (
bytes32 _blobVersionedHash,
bytes32 _batchDataHash,
uint256 _totalL1MessagesPoppedInBatch
)
function sendMessage(
address target,
uint256 value,
bytes calldata message,
uint256 gasLimit
) external payable {
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
{
{
bytes32 _secondBlob;
// Get blob's versioned hash
assembly {
_blobVersionedHash := blobhash(0)
_secondBlob := blobhash(1)
}
if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound();
if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob();
}
uint256 _chunksLength = _chunks.length;
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
uint256 batchDataHashPtr;
assembly {
batchDataHashPtr := mload(0x40)
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
}
// compute the data hash for each chunk
for (uint256 i = 0; i < _chunksLength; i++) {
uint256 _totalNumL1MessagesInChunk;
bytes32 _chunkDataHash;
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV1(
_chunks[i],
_totalL1MessagesPoppedInBatch,
_totalL1MessagesPoppedOverall
);
unchecked {
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
}
assembly {
mstore(batchDataHashPtr, _chunkDataHash)
batchDataHashPtr := add(batchDataHashPtr, 0x20)
}
}
// compute the data hash for current batch
assembly {
let dataLen := mul(_chunksLength, 0x20)
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
}
address _sender = applyL1ToL2Alias(address(this));
emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata);
}
/// @dev Internal function to load batch header from calldata to memory.
/// @param _batchHeader The batch header in calldata.
/// @return batchPtr The start memory offset of loaded batch header.
/// @return _batchHash The hash of the loaded batch header.
/// @return _batchIndex The index of this batch.
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped after this batch.
function _loadBatchHeader(bytes calldata _batchHeader)
internal
view
returns (
uint256 batchPtr,
bytes32 _batchHash,
uint256 _batchIndex,
uint256 _totalL1MessagesPoppedOverall
)
{
// load version from batch header, it is always the first byte.
uint256 version;
assembly {
version := shr(248, calldataload(_batchHeader.offset))
}
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
messageNonce += 1;
}
// version should be always 0 or 1 in current code
uint256 _length;
if (version == 0) {
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
} else if (version == 1) {
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// only check when genesis is imported
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
revert ErrorIncorrectBatchHash();
}
_totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
emit RelayedMessage(_xDomainCalldataHash);
}
/******************************
* Functions from ScrollChain *
******************************/
/// @notice Import layer 2 genesis block
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
}
function commitBatch(
uint8 /*version*/,
bytes calldata _parentBatchHeader,
bytes[] memory chunks,
bytes calldata /*skippedL1MessageBitmap*/
) external {
// check whether the batch is empty
uint256 _chunksLength = chunks.length;
require(_chunksLength > 0, "batch is empty");
// decode batch index
uint256 headerLength = _parentBatchHeader.length;
uint256 parentBatchPtr;
uint256 parentBatchIndex;
assembly {
parentBatchPtr := mload(0x40)
calldatacopy(parentBatchPtr, _parentBatchHeader.offset, headerLength)
mstore(0x40, add(parentBatchPtr, headerLength))
parentBatchIndex := shr(192, mload(add(parentBatchPtr, 1)))
}
/// @dev Internal function to commit a chunk with version 0.
/// @param _chunk The encoded chunk to commit.
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in the current batch before this chunk.
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including the current batch, before this chunk.
/// @return _dataHash The computed data hash for this chunk.
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
function _commitChunkV0(
bytes memory _chunk,
uint256 _totalL1MessagesPoppedInBatch,
uint256 _totalL1MessagesPoppedOverall
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
}
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts, use scope to avoid stack too deep
{
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
}
}
// concatenate tx hashes
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
chunkPtr += 1;
while (_numBlocks > 0) {
// concatenate l1 message hashes
uint256 _numL1MessagesInBlock = ChunkCodecV0.getNumL1Messages(chunkPtr);
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
assembly {
mstore(dataPtr, txHash)
dataPtr := add(dataPtr, 0x20)
}
}
unchecked {
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
_numBlocks -= 1;
chunkPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
assembly {
chunkPtr := add(_chunk, 0x20)
}
// check chunk has correct length
if (l2TxPtr - chunkPtr != _chunk.length) revert ErrorIncompleteL2TransactionData();
// compute data hash and store to memory
assembly {
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
}
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
}
/// @dev Internal function to commit a chunk with version 1.
/// @param _chunk The encoded chunk to commit.
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in current batch.
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including current batch.
/// @return _dataHash The computed data hash for this chunk.
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
function _commitChunkV1(
bytes memory _chunk,
uint256 _totalL1MessagesPoppedInBatch,
uint256 _totalL1MessagesPoppedOverall
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
for (uint256 i = 0; i < _chunksLength; i++) {
_commitChunk(dataPtr, chunks[i]);
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
}
uint256 _numBlocks = ChunkCodecV1.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts, use scope to avoid stack too deep
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV1.copyBlockContext(chunkPtr, dataPtr, i);
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(blockPtr);
unchecked {
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalNumL1MessagesInChunk, 0x20))) // reserve memory for l1 message hashes
chunkPtr := add(chunkPtr, 1)
}
// the number of actual transactions in one chunk: non-skipped l1 messages + l2 txs
uint256 _totalTransactionsInChunk;
// concatenate tx hashes
while (_numBlocks > 0) {
// concatenate l1 message hashes
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(chunkPtr);
uint256 startPtr = dataPtr;
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
unchecked {
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
_numBlocks -= 1;
chunkPtr += ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
}
}
// compute data hash and store to memory
assembly {
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
}
unchecked {
dataPtr += 32;
}
}
function verifyProof(
bytes32 claim,
bytes memory commitment,
bytes memory proof
) external view {
require(commitment.length == 48, "Commitment must be 48 bytes");
require(proof.length == 48, "Proof must be 48 bytes");
bytes32 versionedHash = blobhash(0);
// Compute random challenge point.
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
bytes memory pointEvaluationCalldata = abi.encodePacked(
versionedHash,
point,
claim,
commitment,
proof
);
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(pointEvaluationCalldata);
if (!success) {
revert("Proof verification failed");
}
bytes32 _dataHash;
assembly {
let dataLen := mul(_chunksLength, 0x20)
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
}
bytes memory paddedData = new bytes(89);
assembly {
mstore(add(paddedData, 57), _dataHash)
}
uint256 batchPtr;
assembly {
batchPtr := add(paddedData, 32)
}
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
committedBatches[0] = _batchHash;
emit CommitBatch(parentBatchIndex + 1, _batchHash);
}
function finalizeBatchWithProof(
bytes calldata batchHeader,
bytes32 /*prevStateRoot*/,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata /*aggrProof*/
) external {
// decode batch index
uint256 headerLength = batchHeader.length;
uint256 batchPtr;
uint256 batchIndex;
assembly {
batchPtr := mload(0x40)
calldatacopy(batchPtr, batchHeader.offset, headerLength)
mstore(0x40, add(batchPtr, headerLength))
batchIndex := shr(192, mload(add(batchPtr, 1)))
}
bytes32 _batchHash = committedBatches[0];
emit FinalizeBatch(batchIndex, _batchHash, postStateRoot, withdrawRoot);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to generate the correct cross domain calldata for a message.
/// @param _sender Message sender address.
/// @param _target Target contract address.
/// @param _value The amount of ETH pass to the target.
/// @param _messageNonce Nonce for the provided message.
/// @param _message Message to send to the target.
/// @return ABI encoded cross domain calldata.
function _encodeXDomainCalldata(
address _sender,
address _target,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) internal pure returns (bytes memory) {
return
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
_sender,
_target,
_value,
_messageNonce,
_message
);
}
/// @notice Utility function that converts the address in the L1 that submitted a tx to
/// the inbox to the msg.sender viewed in the L2
/// @param l1Address the address in the L1 that triggered the tx to L2
/// @return l2Address L2 address as viewed in msg.sender
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
unchecked {
l2Address = address(uint160(l1Address) + offset);
}
}
function _commitChunk(
uint256 memPtr,
bytes memory _chunk
) internal pure {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
uint256 blockPtr;
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
blockPtr := add(chunkPtr, 1) // skip numBlocks
}
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
blockPtr := add(chunkPtr, 1) // reset block ptr
}
// concatenate tx hashes
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
while (_numBlocks > 0) {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
for (uint256 j = 0; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
assembly {
mstore(dataPtr, txHash)
dataPtr := add(dataPtr, 0x20)
}
}
unchecked {
_numBlocks -= 1;
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
// check chunk has correct length
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
// compute data hash and store to memory
assembly {
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
mstore(memPtr, dataHash)
}
}
address private constant POINT_EVALUATION_PRECOMPILE_ADDRESS = 0x000000000000000000000000000000000000000A;
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
function verifyProof(
bytes32 claim,
bytes memory commitment,
bytes memory proof
) external view {
require(commitment.length == 48, "Commitment must be 48 bytes");
require(proof.length == 48, "Proof must be 48 bytes");
bytes32 versionedHash = blobhash(0);
// Compute random challenge point.
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
bytes memory pointEvaluationCalldata = abi.encodePacked(
versionedHash,
point,
claim,
commitment,
proof
);
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDRESS.staticcall(pointEvaluationCalldata);
if (!success) {
revert("Proof verification failed");
}
}
}

View File

@@ -135,7 +135,7 @@ func prepareContracts(t *testing.T) {
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
assert.NoError(t, err)
scrollChainAddress := crypto.CreateAddress(l1Auth.From, nonce)
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
tx := types.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
assert.NoError(t, err)
err = l1Client.SendTransaction(context.Background(), signedTx)
@@ -174,8 +174,6 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844)
t.Run("TestCommitBatchAndFinalizeBatchBeforeAndPost4844", testCommitBatchAndFinalizeBatchBeforeAndPost4844)
// l1/l2 gas oracle
t.Run("TestImportL1GasPrice", testImportL1GasPrice)

View File

@@ -7,7 +7,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
@@ -70,7 +69,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -98,7 +97,7 @@ func testImportL2GasPrice(t *testing.T) {
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
_, err = batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
// check db status

View File

@@ -29,7 +29,7 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, l2Relayer)
defer l2Relayer.StopSenders()
@@ -59,7 +59,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -69,18 +69,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
for i := 0; i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
Number: big.NewInt(int64(i)),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
})
}
@@ -97,14 +96,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
@@ -116,6 +107,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, chunks, 1)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
@@ -178,256 +175,3 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
prepareContracts(t)
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
chainConfig := &params.ChainConfig{BanachBlock: big.NewInt(0)}
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Create L1Watcher
l1Cfg := rollupApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
})
}
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
batch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.CommitTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
// add dummy proof
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
func testCommitBatchAndFinalizeBatchBeforeAndPost4844(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
prepareContracts(t)
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
chainConfig := &params.ChainConfig{BanachBlock: big.NewInt(5)}
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Create L1Watcher
l1Cfg := rollupApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
})
}
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)
cp.TryProposeChunk()
cp.TryProposeChunk()
bp.TryProposeBatch()
bp.TryProposeBatch()
for i := uint64(0); i < 2; i++ {
l2Relayer.ProcessPendingBatches()
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.CommitTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
// add dummy proof
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
}

View File

@@ -65,17 +65,20 @@ func (*Batch) TableName() string {
return "batch"
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
db = db.Order("index desc")
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &batch, nil
return &latestBatch, nil
}
// InsertBatch inserts a new batch into the database.
@@ -95,17 +98,21 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
log.Error("failed to create new DA batch",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
return nil, err
}
var startChunkIndex uint64
if batch.Index > 0 {
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
if getErr != nil {
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil {
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// no batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
startChunkIndex = parentBatch.EndChunkIndex + 1
}

View File

@@ -108,7 +108,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to create DA chunk", "err", err)
log.Error("failed to initialize new DA chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
@@ -118,6 +118,18 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit gas", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
@@ -126,6 +138,10 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.L2GasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),