mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
4 Commits
v4.3.51
...
feat/enfor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc3dc8d225 | ||
|
|
317ba26206 | ||
|
|
7a1af5913e | ||
|
|
221a06ecf2 |
@@ -35,7 +35,7 @@ func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Result: pagedTxs, Total: total}
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Result: pagedTxs, Total: total}
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Result: pagedTxs, Total: total}
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
@@ -89,6 +89,6 @@ func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Result: results, Total: uint64(len(results))}
|
||||
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -261,40 +262,44 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
|
||||
|
||||
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
MsgHash: message.MessageHash,
|
||||
Amount: message.TokenAmounts,
|
||||
L1Token: message.L1TokenAddress,
|
||||
L2Token: message.L2TokenAddress,
|
||||
IsL1: orm.MessageType(message.MessageType) == orm.MessageTypeL1SentMessage,
|
||||
TxStatus: message.TxStatus,
|
||||
MessageHash: message.MessageHash,
|
||||
TokenType: orm.TokenType(message.TokenType),
|
||||
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
|
||||
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
|
||||
L1TokenAddress: message.L1TokenAddress,
|
||||
L2TokenAddress: message.L2TokenAddress,
|
||||
MessageType: orm.MessageType(message.MessageType),
|
||||
TxStatus: orm.TxStatusType(message.TxStatus),
|
||||
BlockTimestamp: message.BlockTimestamp,
|
||||
}
|
||||
if txHistory.IsL1 {
|
||||
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
|
||||
txHistory.Hash = message.L1TxHash
|
||||
txHistory.ReplayTxHash = message.L1ReplayTxHash
|
||||
txHistory.RefundTxHash = message.L1RefundTxHash
|
||||
txHistory.BlockNumber = message.L1BlockNumber
|
||||
txHistory.FinalizeTx = &types.Finalized{
|
||||
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
|
||||
Hash: message.L2TxHash,
|
||||
BlockNumber: message.L2BlockNumber,
|
||||
}
|
||||
} else {
|
||||
txHistory.Hash = message.L2TxHash
|
||||
txHistory.BlockNumber = message.L2BlockNumber
|
||||
txHistory.FinalizeTx = &types.Finalized{
|
||||
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
|
||||
Hash: message.L1TxHash,
|
||||
BlockNumber: message.L1BlockNumber,
|
||||
}
|
||||
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
|
||||
txHistory.ClaimInfo = &types.UserClaimInfo{
|
||||
From: message.MessageFrom,
|
||||
To: message.MessageTo,
|
||||
Value: message.MessageValue,
|
||||
Nonce: strconv.FormatUint(message.MessageNonce, 10),
|
||||
Message: message.MessageData,
|
||||
Proof: "0x" + common.Bytes2Hex(message.MerkleProof),
|
||||
BatchIndex: strconv.FormatUint(message.BatchIndex, 10),
|
||||
Claimable: true,
|
||||
txHistory.ClaimInfo = &types.ClaimInfo{
|
||||
From: message.MessageFrom,
|
||||
To: message.MessageTo,
|
||||
Value: message.MessageValue,
|
||||
Nonce: strconv.FormatUint(message.MessageNonce, 10),
|
||||
Message: message.MessageData,
|
||||
Proof: types.L2MessageProof{
|
||||
BatchIndex: strconv.FormatUint(message.BatchIndex, 10),
|
||||
MerkleProof: "0x" + common.Bytes2Hex(message.MerkleProof),
|
||||
},
|
||||
Claimable: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
|
||||
blocks, err := utils.GetL1BlocksInRange(ctx, f.client, from, to)
|
||||
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -90,8 +90,8 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.BlockWithRowConsumption, error) {
|
||||
blocks, err := utils.GetL2BlocksInRange(ctx, f.client, from, to)
|
||||
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
|
||||
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -117,7 +117,7 @@ func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.BlockWithRowConsumption) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2RevertedUserTxs []*orm.CrossMessage
|
||||
var l2RevertedRelayedMessageTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,8 +39,8 @@ type QueryByHashRequest struct {
|
||||
|
||||
// ResultData contains return txs and total
|
||||
type ResultData struct {
|
||||
Result []*TxHistoryInfo `json:"result"`
|
||||
Total uint64 `json:"total"`
|
||||
Results []*TxHistoryInfo `json:"results"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
// Response the response schema
|
||||
@@ -48,39 +50,46 @@ type Response struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// Finalized the schema of tx finalized infos
|
||||
type Finalized struct {
|
||||
// CounterpartChainTx is the schema of counterpart chain tx info
|
||||
type CounterpartChainTx struct {
|
||||
Hash string `json:"hash"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockNumber uint64 `json:"block_number"`
|
||||
}
|
||||
|
||||
// UserClaimInfo the schema of tx claim infos
|
||||
type UserClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value string `json:"value"`
|
||||
Nonce string `json:"nonce"`
|
||||
Message string `json:"message"`
|
||||
Proof string `json:"proof"`
|
||||
BatchIndex string `json:"batch_index"`
|
||||
Claimable bool `json:"claimable"`
|
||||
// ClaimInfo is the schema of tx claim info
|
||||
type ClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value string `json:"value"`
|
||||
Nonce string `json:"nonce"`
|
||||
Message string `json:"message"`
|
||||
Proof L2MessageProof `json:"proof"`
|
||||
Claimable bool `json:"claimable"`
|
||||
}
|
||||
|
||||
// L2MessageProof is the schema of L2 message proof
|
||||
type L2MessageProof struct {
|
||||
BatchIndex string `json:"batch_index"`
|
||||
MerkleProof string `json:"merkle_proof"`
|
||||
}
|
||||
|
||||
// TxHistoryInfo the schema of tx history infos
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
ReplayTxHash string `json:"replayTxHash"`
|
||||
RefundTxHash string `json:"refundTxHash"`
|
||||
MsgHash string `json:"msgHash"`
|
||||
Amount string `json:"amount"`
|
||||
IsL1 bool `json:"isL1"`
|
||||
L1Token string `json:"l1Token"`
|
||||
L2Token string `json:"l2Token"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
TxStatus int `json:"txStatus"`
|
||||
FinalizeTx *Finalized `json:"finalizeTx"`
|
||||
ClaimInfo *UserClaimInfo `json:"claimInfo"`
|
||||
BlockTimestamp uint64 `json:"blockTimestamp"`
|
||||
Hash string `json:"hash"`
|
||||
ReplayTxHash string `json:"replay_tx_hash"`
|
||||
RefundTxHash string `json:"refund_tx_hash"`
|
||||
MessageHash string `json:"message_hash"`
|
||||
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
|
||||
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
|
||||
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
|
||||
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
|
||||
L1TokenAddress string `json:"l1_token_address"`
|
||||
L2TokenAddress string `json:"l2_token_address"`
|
||||
BlockNumber uint64 `json:"block_number"`
|
||||
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
|
||||
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
|
||||
ClaimInfo *ClaimInfo `json:"claim_info"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp"`
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
@@ -116,8 +115,8 @@ func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
|
||||
return startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetL1BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL1BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
|
||||
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetBlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.Block, end-start+1)
|
||||
@@ -148,38 +147,6 @@ func GetL1BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end u
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// GetL2BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL2BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.BlockWithRowConsumption, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.BlockWithRowConsumption, end-start+1)
|
||||
concurrency = 32
|
||||
sem = make(chan struct{}, concurrency)
|
||||
)
|
||||
|
||||
for i := start; i <= end; i++ {
|
||||
sem <- struct{}{} // Acquire a slot in the semaphore
|
||||
blockNum := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(i))
|
||||
index := i - start
|
||||
eg.Go(func() error {
|
||||
defer func() { <-sem }() // Release the slot when done
|
||||
block, err := cli.GetBlockByNumberOrHash(ctx, blockNum)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
|
||||
return err
|
||||
}
|
||||
blocks[index] = block
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
log.Error("Error waiting for block fetching routines", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// ConvertBigIntArrayToString convert the big int array to string
|
||||
func ConvertBigIntArrayToString(array []*big.Int) string {
|
||||
stringArray := make([]string, len(array))
|
||||
@@ -191,7 +158,19 @@ func ConvertBigIntArrayToString(array []*big.Int) string {
|
||||
return result
|
||||
}
|
||||
|
||||
// GetSkippedQueueIndices get the skipped queue indices
|
||||
// ConvertStringToStringArray takes a string with values separated by commas and returns a slice of strings
|
||||
func ConvertStringToStringArray(s string) []string {
|
||||
if s == "" {
|
||||
return []string{}
|
||||
}
|
||||
stringParts := strings.Split(s, ",")
|
||||
for i, part := range stringParts {
|
||||
stringParts[i] = strings.TrimSpace(part)
|
||||
}
|
||||
return stringParts
|
||||
}
|
||||
|
||||
// GetSkippedQueueIndices gets the skipped queue indices
|
||||
func GetSkippedQueueIndices(startIndex uint64, skippedBitmap *big.Int) []uint64 {
|
||||
var indices []uint64
|
||||
for i := 0; i < 256; i++ {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -36,3 +37,55 @@ func TestGetBatchRangeFromCalldata(t *testing.T) {
|
||||
assert.Equal(t, start, uint64(0))
|
||||
assert.Equal(t, finish, uint64(0))
|
||||
}
|
||||
|
||||
// TestConvertBigIntArrayToString tests the ConvertBigIntArrayToString function
|
||||
func TestConvertBigIntArrayToString(t *testing.T) {
|
||||
tests := []struct {
|
||||
array []*big.Int
|
||||
expected string
|
||||
}{
|
||||
{[]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, "1, 2, 3"},
|
||||
{[]*big.Int{big.NewInt(0), big.NewInt(-1)}, "0, -1"},
|
||||
{[]*big.Int{}, ""},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := ConvertBigIntArrayToString(test.array)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertStringToStringArray tests the ConvertStringToStringArray function
|
||||
func TestConvertStringToStringArray(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
expected []string
|
||||
}{
|
||||
{"1, 2, 3", []string{"1", "2", "3"}},
|
||||
{" 4 , 5 , 6 ", []string{"4", "5", "6"}},
|
||||
{"", []string{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := ConvertStringToStringArray(test.s)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetSkippedQueueIndices tests the GetSkippedQueueIndices function
|
||||
func TestGetSkippedQueueIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
startIndex uint64
|
||||
bitmap *big.Int
|
||||
expected []uint64
|
||||
}{
|
||||
{0, big.NewInt(0b101), []uint64{0, 2}},
|
||||
{10, big.NewInt(0b110), []uint64{11, 12}},
|
||||
{0, big.NewInt(0), nil}, // No bits set
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := GetSkippedQueueIndices(test.startIndex, test.bitmap)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ type App struct {
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dokerApp struct
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.51"
|
||||
var tag = "v4.3.53"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -42,6 +42,28 @@ Add an account to the sequencer list.
|
||||
|---|---|---|
|
||||
| _account | address | The address of account to add. |
|
||||
|
||||
### commitAndFinalizeBatchEnforced
|
||||
|
||||
```solidity
|
||||
function commitAndFinalizeBatchEnforced(bytes _parentBatchHeader, bytes[] _chunks, bytes _skippedL1MessageBitmap, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _withdrawRootProof, bytes _aggrProof) external nonpayable
|
||||
```
|
||||
|
||||
Commit and finalize enforced batch.
|
||||
|
||||
*This function can by used to commit and finalize a new batch in a single step if all previous batches are finalized. It can also be used to finalize the earliest pending batch. In this case, the provided batch should match the pending batch. If user choose to finalize a pending batch, the batch hash of current header should match with `committedBatches[currentIndex]`. Otherwise, `committedBatches[currentIndex]` should be `bytes32(0)`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _parentBatchHeader | bytes | undefined |
|
||||
| _chunks | bytes[] | undefined |
|
||||
| _skippedL1MessageBitmap | bytes | undefined |
|
||||
| _postStateRoot | bytes32 | undefined |
|
||||
| _withdrawRoot | bytes32 | undefined |
|
||||
| _withdrawRootProof | bytes | undefined |
|
||||
| _aggrProof | bytes | undefined |
|
||||
|
||||
### commitBatch
|
||||
|
||||
```solidity
|
||||
@@ -86,7 +108,7 @@ Return the batch hash of a committed batch.
|
||||
### finalizeBatchWithProof
|
||||
|
||||
```solidity
|
||||
function finalizeBatchWithProof(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _aggrProof) external nonpayable
|
||||
function finalizeBatchWithProof(bytes _batchHeader, bytes32, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _aggrProof) external nonpayable
|
||||
```
|
||||
|
||||
Finalize a committed batch on layer 1.
|
||||
@@ -98,7 +120,7 @@ Finalize a committed batch on layer 1.
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _batchHeader | bytes | undefined |
|
||||
| _prevStateRoot | bytes32 | undefined |
|
||||
| _1 | bytes32 | undefined |
|
||||
| _postStateRoot | bytes32 | undefined |
|
||||
| _withdrawRoot | bytes32 | undefined |
|
||||
| _aggrProof | bytes | undefined |
|
||||
@@ -160,6 +182,22 @@ Initialize the storage of ScrollChain.
|
||||
| _verifier | address | The address of zkevm verifier contract. |
|
||||
| _maxNumTxInChunk | uint256 | The maximum number of transactions allowed in each chunk. |
|
||||
|
||||
### initializeV2
|
||||
|
||||
```solidity
|
||||
function initializeV2(address _zkTrieVerifier) external nonpayable
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _zkTrieVerifier | address | undefined |
|
||||
|
||||
### isBatchFinalized
|
||||
|
||||
```solidity
|
||||
@@ -260,6 +298,23 @@ The chain id of the corresponding layer 2 chain.
|
||||
|---|---|---|
|
||||
| _0 | uint64 | undefined |
|
||||
|
||||
### maxFinalizationDelay
|
||||
|
||||
```solidity
|
||||
function maxFinalizationDelay() external view returns (uint256)
|
||||
```
|
||||
|
||||
The maximum finalization delay in seconds before entering the enforced mode.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### maxNumTxInChunk
|
||||
|
||||
```solidity
|
||||
@@ -475,6 +530,23 @@ Return the message root of a committed batch.
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
### zkTrieVerifier
|
||||
|
||||
```solidity
|
||||
function zkTrieVerifier() external view returns (address)
|
||||
```
|
||||
|
||||
The address of zk trie verifier.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -33,7 +33,7 @@ env CONTRACT_NAME=L2ERC1155Gateway npx hardhat run --network $layer2 scripts/dep
|
||||
env CONTRACT_NAME=L2ETHGateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
|
||||
env CONTRACT_NAME=L2WETHGateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
|
||||
|
||||
# initalize contracts in layer 1, should set proper bash env variables first
|
||||
# initialize contracts in layer 1, should set proper bash env variables first
|
||||
npx hardhat --network $layer1 run scripts/initialize_l1_erc20_gateway.ts
|
||||
npx hardhat --network $layer1 run scripts/initialize_l1_gateway_router.ts
|
||||
npx hardhat --network $layer1 run scripts/initialize_scroll_chain.ts
|
||||
@@ -42,7 +42,7 @@ npx hardhat --network $layer1 run scripts/initialize_l1_custom_erc20_gateway.ts
|
||||
npx hardhat --network $layer1 run scripts/initialize_l1_erc1155_gateway.ts
|
||||
npx hardhat --network $layer1 run scripts/initialize_l1_erc721_gateway.ts
|
||||
|
||||
# initalize contracts in layer 2, should set proper bash env variables first
|
||||
# initialize contracts in layer 2, should set proper bash env variables first
|
||||
npx hardhat --network $layer2 run scripts/initialize_l2_erc20_gateway.ts
|
||||
npx hardhat --network $layer2 run scripts/initialize_l2_gateway_router.ts
|
||||
npx hardhat --network $layer2 run scripts/initialize_l2_custom_erc20_gateway.ts
|
||||
|
||||
@@ -90,4 +90,23 @@ interface IScrollChain {
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
|
||||
/// @notice Commit and finalize enforced batch.
|
||||
///
|
||||
/// @param parentBatchHeader The header of parent batch, see the comments of `BatchHeaderV0Codec`.
|
||||
/// @param chunks The list of encoded chunks, see the comments of `ChunkCodec`.
|
||||
/// @param skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not.
|
||||
/// @param postStateRoot The state root of current batch.
|
||||
/// @param withdrawRoot The withdraw trie root of current batch.
|
||||
/// @param withdrawRootProof The proof used to verify the correctness of `withdrawRoot`.
|
||||
/// @param aggrProof The aggregation proof for current batch.
|
||||
function commitAndFinalizeBatchEnforced(
|
||||
bytes calldata parentBatchHeader,
|
||||
bytes[] memory chunks,
|
||||
bytes calldata skippedL1MessageBitmap,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata withdrawRootProof,
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {BatchHeaderV0Codec} from "../../libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {ChunkCodec} from "../../libraries/codec/ChunkCodec.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkTrieVerifier} from "../../libraries/verifier/IZkTrieVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
// solhint-disable reason-string
|
||||
@@ -36,6 +37,16 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @param newMaxNumTxInChunk The new value of `maxNumTxInChunk`.
|
||||
event UpdateMaxNumTxInChunk(uint256 oldMaxNumTxInChunk, uint256 newMaxNumTxInChunk);
|
||||
|
||||
/// @notice Emitted when the value of `zkTrieVerifier` is updated.
|
||||
/// @param oldZkTrieVerifier The old value of `zkTrieVerifier`.
|
||||
/// @param newZkTrieVerifier The new value of `zkTrieVerifier`.
|
||||
event UpdateZkTrieVerifier(address indexed oldZkTrieVerifier, address indexed newZkTrieVerifier);
|
||||
|
||||
/// @notice Emitted when the value of `maxFinalizationDelay` is updated.
|
||||
/// @param oldMaxFinalizationDelay The old value of `maxFinalizationDelay`.
|
||||
/// @param newMaxFinalizationDelay The new value of `maxFinalizationDelay`.
|
||||
event UpdateMaxFinalizationDelay(uint256 oldMaxFinalizationDelay, uint256 newMaxFinalizationDelay);
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
@@ -49,6 +60,19 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @notice The address of RollupVerifier.
|
||||
address public immutable verifier;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
/// @param lastIndex The index of latest finalized batch
|
||||
/// @param timestamp The block timestamp of last finalization
|
||||
/// @param mode The current status, 1 means enforced mode, 0 means not.
|
||||
struct FinalizationState {
|
||||
uint128 lastIndex;
|
||||
uint64 timestamp;
|
||||
uint8 mode;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
@@ -68,8 +92,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @notice Whether an account is a prover.
|
||||
mapping(address => bool) public isProver;
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
uint256 public override lastFinalizedBatchIndex;
|
||||
/// @dev The storage slot used as lastFinalizedBatchIndex, which is deprecated now.
|
||||
uint256 private __lastFinalizedBatchIndex;
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
mapping(uint256 => bytes32) public override committedBatches;
|
||||
@@ -80,6 +104,14 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @inheritdoc IScrollChain
|
||||
mapping(uint256 => bytes32) public override withdrawRoots;
|
||||
|
||||
FinalizationState internal finalizationState;
|
||||
|
||||
/// @notice The maximum finalization delay in seconds before entering the enforced mode.
|
||||
uint256 public maxFinalizationDelay;
|
||||
|
||||
/// @notice The address of zk trie verifier.
|
||||
address public zkTrieVerifier;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
@@ -141,13 +173,25 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
emit UpdateMaxNumTxInChunk(0, _maxNumTxInChunk);
|
||||
}
|
||||
|
||||
function initializeV2(address _zkTrieVerifier) external reinitializer(2) {
|
||||
finalizationState = FinalizationState(uint128(__lastFinalizedBatchIndex), uint64(block.timestamp), 0);
|
||||
|
||||
_updateZkTrieVerifier(_zkTrieVerifier);
|
||||
_updateMaxFinalizationDelay(1 weeks);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function lastFinalizedBatchIndex() public view override returns (uint256) {
|
||||
return finalizationState.lastIndex;
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function isBatchFinalized(uint256 _batchIndex) external view override returns (bool) {
|
||||
return _batchIndex <= lastFinalizedBatchIndex;
|
||||
return _batchIndex <= lastFinalizedBatchIndex();
|
||||
}
|
||||
|
||||
/*****************************
|
||||
@@ -162,7 +206,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
// check whether the genesis batch is imported
|
||||
require(finalizedStateRoots[0] == bytes32(0), "Genesis batch imported");
|
||||
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeader(_batchHeader);
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeaderCalldata(_batchHeader);
|
||||
|
||||
// check all fields except `dataHash` and `lastBlockHash` are zero
|
||||
unchecked {
|
||||
@@ -189,100 +233,25 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata _skippedL1MessageBitmap
|
||||
) external override OnlySequencer whenNotPaused {
|
||||
// if we are in enforced mode, exit from it.
|
||||
if (finalizationState.mode == 1) finalizationState.mode = 0;
|
||||
|
||||
require(_version == 0, "invalid version");
|
||||
|
||||
// check whether the batch is empty
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
require(_chunksLength > 0, "batch is empty");
|
||||
|
||||
// The overall memory layout in this function is organized as follows
|
||||
// +---------------------+-------------------+------------------+
|
||||
// | parent batch header | chunk data hashes | new batch header |
|
||||
// +---------------------+-------------------+------------------+
|
||||
// ^ ^ ^
|
||||
// batchPtr dataPtr newBatchPtr (re-use var batchPtr)
|
||||
//
|
||||
// 1. We copy the parent batch header from calldata to memory starting at batchPtr
|
||||
// 2. We store `_chunksLength` number of Keccak hashes starting at `dataPtr`. Each Keccak
|
||||
// hash corresponds to the data hash of a chunk. So we reserve the memory region from
|
||||
// `dataPtr` to `dataPtr + _chunkLength * 32` for the chunk data hashes.
|
||||
// 3. The memory starting at `newBatchPtr` is used to store the new batch header and compute
|
||||
// the batch hash.
|
||||
|
||||
// the variable `batchPtr` will be reused later for the current batch
|
||||
(uint256 batchPtr, bytes32 _parentBatchHash) = _loadBatchHeader(_parentBatchHeader);
|
||||
|
||||
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(batchPtr);
|
||||
uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.totalL1MessagePopped(batchPtr);
|
||||
require(committedBatches[_batchIndex] == _parentBatchHash, "incorrect parent batch hash");
|
||||
require(committedBatches[_batchIndex + 1] == 0, "batch already committed");
|
||||
|
||||
// load `dataPtr` and reserve the memory region for chunk data hashes
|
||||
uint256 dataPtr;
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
|
||||
}
|
||||
|
||||
// compute the data hash for each chunk
|
||||
uint256 _totalL1MessagesPoppedInBatch;
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
uint256 _totalNumL1MessagesInChunk = _commitChunk(
|
||||
dataPtr,
|
||||
_chunks[i],
|
||||
_totalL1MessagesPoppedInBatch,
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_skippedL1MessageBitmap
|
||||
);
|
||||
|
||||
unchecked {
|
||||
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
|
||||
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
|
||||
dataPtr += 32;
|
||||
}
|
||||
}
|
||||
|
||||
// check the length of bitmap
|
||||
unchecked {
|
||||
require(
|
||||
((_totalL1MessagesPoppedInBatch + 255) / 256) * 32 == _skippedL1MessageBitmap.length,
|
||||
"wrong bitmap length"
|
||||
);
|
||||
}
|
||||
|
||||
// compute the data hash for current batch
|
||||
bytes32 _dataHash;
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
|
||||
|
||||
batchPtr := mload(0x40) // reset batchPtr
|
||||
_batchIndex := add(_batchIndex, 1) // increase batch index
|
||||
}
|
||||
|
||||
// store entries, the order matters
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV0Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
BatchHeaderV0Codec.storeSkippedBitmap(batchPtr, _skippedL1MessageBitmap);
|
||||
|
||||
// compute batch hash
|
||||
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89 + _skippedL1MessageBitmap.length);
|
||||
|
||||
committedBatches[_batchIndex] = _batchHash;
|
||||
emit CommitBatch(_batchIndex, _batchHash);
|
||||
_commitBatch(_parentBatchHeader, _chunks, _skippedL1MessageBitmap);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
/// @dev If the owner want to revert a sequence of batches by sending multiple transactions,
|
||||
/// make sure to revert recent batches first.
|
||||
function revertBatch(bytes calldata _batchHeader, uint256 _count) external onlyOwner {
|
||||
function revertBatch(bytes calldata _batchHeader, uint256 _count) external {
|
||||
// if we are not in enforced mode, only owner can revert batches.
|
||||
// if we are in enforced mode, allow any users to revert batches.
|
||||
if (finalizationState.mode == 0) _checkOwner();
|
||||
|
||||
require(_count > 0, "count must be nonzero");
|
||||
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeader(_batchHeader);
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeaderCalldata(_batchHeader);
|
||||
|
||||
// check batch hash
|
||||
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(memPtr);
|
||||
@@ -291,7 +260,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
require(committedBatches[_batchIndex + _count] == bytes32(0), "reverting must start from the ending");
|
||||
|
||||
// check finalization
|
||||
require(_batchIndex > lastFinalizedBatchIndex, "can only revert unfinalized batch");
|
||||
require(_batchIndex > lastFinalizedBatchIndex(), "can only revert unfinalized batch");
|
||||
|
||||
while (_count > 0) {
|
||||
committedBatches[_batchIndex] = bytes32(0);
|
||||
@@ -311,68 +280,57 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @inheritdoc IScrollChain
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _aggrProof
|
||||
) external override OnlyProver whenNotPaused {
|
||||
require(_prevStateRoot != bytes32(0), "previous state root is zero");
|
||||
require(_postStateRoot != bytes32(0), "new state root is zero");
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeader(_batchHeader);
|
||||
(uint256 memPtr, bytes32 _batchHash) = _loadBatchHeaderCalldata(_batchHeader);
|
||||
|
||||
bytes32 _dataHash = BatchHeaderV0Codec.dataHash(memPtr);
|
||||
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(memPtr);
|
||||
require(committedBatches[_batchIndex] == _batchHash, "incorrect batch hash");
|
||||
// finalize batch
|
||||
_finalizeBatch(memPtr, _batchHash, _postStateRoot, _withdrawRoot, _aggrProof);
|
||||
}
|
||||
|
||||
// verify previous state root.
|
||||
require(finalizedStateRoots[_batchIndex - 1] == _prevStateRoot, "incorrect previous state root");
|
||||
|
||||
// avoid duplicated verification
|
||||
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
|
||||
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash = keccak256(
|
||||
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
|
||||
);
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_batchIndex, _aggrProof, _publicInputHash);
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
require(lastFinalizedBatchIndex + 1 == _batchIndex, "incorrect batch index");
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
uint256 _l1MessagePopped = BatchHeaderV0Codec.l1MessagePopped(memPtr);
|
||||
if (_l1MessagePopped > 0) {
|
||||
IL1MessageQueue _queue = IL1MessageQueue(messageQueue);
|
||||
|
||||
unchecked {
|
||||
uint256 _startIndex = BatchHeaderV0Codec.totalL1MessagePopped(memPtr) - _l1MessagePopped;
|
||||
|
||||
for (uint256 i = 0; i < _l1MessagePopped; i += 256) {
|
||||
uint256 _count = 256;
|
||||
if (_l1MessagePopped - i < _count) {
|
||||
_count = _l1MessagePopped - i;
|
||||
}
|
||||
uint256 _skippedBitmap = BatchHeaderV0Codec.skippedBitmap(memPtr, i / 256);
|
||||
|
||||
_queue.popCrossDomainMessage(_startIndex, _count, _skippedBitmap);
|
||||
|
||||
_startIndex += 256;
|
||||
}
|
||||
/// @inheritdoc IScrollChain
|
||||
///
|
||||
/// @dev This function can by used to commit and finalize a new batch in a
|
||||
/// single step if all previous batches are finalized. It can also be used
|
||||
/// to finalize the earliest pending batch. In this case, the provided batch
|
||||
/// should match the pending batch.
|
||||
///
|
||||
/// If user choose to finalize a pending batch, the batch hash of current
|
||||
/// header should match with `committedBatches[currentIndex]`.
|
||||
/// Otherwise, `committedBatches[currentIndex]` should be `bytes32(0)`.
|
||||
function commitAndFinalizeBatchEnforced(
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata _skippedL1MessageBitmap,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _withdrawRootProof,
|
||||
bytes calldata _aggrProof
|
||||
) external {
|
||||
// check and enable enforced mode.
|
||||
if (finalizationState.mode == 0) {
|
||||
if (finalizationState.timestamp + maxFinalizationDelay < block.timestamp) {
|
||||
finalizationState.mode = 1;
|
||||
} else {
|
||||
revert("not allowed");
|
||||
}
|
||||
}
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
(uint256 memPtr, bytes32 _batchHash) = _commitBatch(_parentBatchHeader, _chunks, _skippedL1MessageBitmap);
|
||||
|
||||
(bytes32 stateRoot, bytes32 storageValue) = IZkTrieVerifier(zkTrieVerifier).verifyZkTrieProof(
|
||||
0x5300000000000000000000000000000000000000,
|
||||
bytes32(0),
|
||||
_withdrawRootProof
|
||||
);
|
||||
require(stateRoot == _postStateRoot, "state root mismatch");
|
||||
require(storageValue == _withdrawRoot, "withdraw root mismatch");
|
||||
|
||||
_finalizeBatch(memPtr, _batchHash, _postStateRoot, _withdrawRoot, _aggrProof);
|
||||
}
|
||||
|
||||
/************************
|
||||
@@ -437,15 +395,41 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
}
|
||||
}
|
||||
|
||||
function updateZkTrieVerifier(address _newZkTrieVerifier) external onlyOwner {
|
||||
_updateZkTrieVerifier(_newZkTrieVerifier);
|
||||
}
|
||||
|
||||
function updateMaxFinalizationDelay(uint256 _newMaxFinalizationDelay) external onlyOwner {
|
||||
_updateMaxFinalizationDelay(_newMaxFinalizationDelay);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
function _updateZkTrieVerifier(address _newZkTrieVerifier) internal {
|
||||
address _oldZkTrieVerifier = zkTrieVerifier;
|
||||
zkTrieVerifier = _newZkTrieVerifier;
|
||||
|
||||
emit UpdateZkTrieVerifier(_oldZkTrieVerifier, _newZkTrieVerifier);
|
||||
}
|
||||
|
||||
function _updateMaxFinalizationDelay(uint256 _newMaxFinalizationDelay) internal {
|
||||
uint256 _oldMaxFinalizationDelay = maxFinalizationDelay;
|
||||
maxFinalizationDelay = _newMaxFinalizationDelay;
|
||||
|
||||
emit UpdateMaxFinalizationDelay(_oldMaxFinalizationDelay, _newMaxFinalizationDelay);
|
||||
}
|
||||
|
||||
/// @dev Internal function to load batch header from calldata to memory.
|
||||
/// @param _batchHeader The batch header in calldata.
|
||||
/// @return memPtr The start memory offset of loaded batch header.
|
||||
/// @return _batchHash The hash of the loaded batch header.
|
||||
function _loadBatchHeader(bytes calldata _batchHeader) internal pure returns (uint256 memPtr, bytes32 _batchHash) {
|
||||
function _loadBatchHeaderCalldata(bytes calldata _batchHeader)
|
||||
internal
|
||||
pure
|
||||
returns (uint256 memPtr, bytes32 _batchHash)
|
||||
{
|
||||
// load to memory
|
||||
uint256 _length;
|
||||
(memPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
@@ -605,4 +589,185 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
|
||||
return _ptr;
|
||||
}
|
||||
|
||||
function _commitBatch(
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata _skippedL1MessageBitmap
|
||||
) internal returns (uint256 batchPtr, bytes32 batchHash) {
|
||||
// check whether the batch is empty
|
||||
require(_chunks.length > 0, "batch is empty");
|
||||
|
||||
// The overall memory layout in this function is organized as follows
|
||||
// +---------------------+-------------------+------------------+
|
||||
// | parent batch header | chunk data hashes | new batch header |
|
||||
// +---------------------+-------------------+------------------+
|
||||
// ^ ^ ^
|
||||
// batchPtr dataPtr newBatchPtr (re-use var batchPtr)
|
||||
//
|
||||
// 1. We copy the parent batch header from calldata to memory starting at batchPtr
|
||||
// 2. We store `_chunks.length` number of Keccak hashes starting at `dataPtr`. Each Keccak
|
||||
// hash corresponds to the data hash of a chunk. So we reserve the memory region from
|
||||
// `dataPtr` to `dataPtr + _chunkLength * 32` for the chunk data hashes.
|
||||
// 3. The memory starting at `newBatchPtr` is used to store the new batch header and compute
|
||||
// the batch hash.
|
||||
|
||||
// the variable `batchPtr` will be reused later for the current batch
|
||||
bytes32 _parentBatchHash;
|
||||
(batchPtr, _parentBatchHash) = _loadBatchHeaderCalldata(_parentBatchHeader);
|
||||
|
||||
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(batchPtr);
|
||||
uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.totalL1MessagePopped(batchPtr);
|
||||
require(committedBatches[_batchIndex] == _parentBatchHash, "incorrect parent batch hash");
|
||||
require(committedBatches[_batchIndex + 1] == 0, "batch already committed");
|
||||
|
||||
// compute the data hash for chunks
|
||||
(bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunks(
|
||||
_chunks,
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_skippedL1MessageBitmap
|
||||
);
|
||||
_totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch;
|
||||
|
||||
// reset `batchPtr` for current batch and reserve memory
|
||||
assembly {
|
||||
batchPtr := mload(0x40) // reset batchPtr
|
||||
mstore(0x40, add(batchPtr, add(89, _skippedL1MessageBitmap.length)))
|
||||
_batchIndex := add(_batchIndex, 1) // increase batch index
|
||||
}
|
||||
|
||||
// store entries, the order matters
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, 0);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV0Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
BatchHeaderV0Codec.storeSkippedBitmap(batchPtr, _skippedL1MessageBitmap);
|
||||
|
||||
// compute batch hash
|
||||
batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89 + _skippedL1MessageBitmap.length);
|
||||
|
||||
bytes32 storedBatchHash = committedBatches[_batchIndex];
|
||||
if (finalizationState.mode == 1) {
|
||||
require(storedBatchHash == bytes32(0) || storedBatchHash == batchHash, "batch hash mismatch");
|
||||
} else {
|
||||
require(storedBatchHash == bytes32(0), "batch already committed");
|
||||
}
|
||||
if (storedBatchHash == bytes32(0)) {
|
||||
committedBatches[_batchIndex] = batchHash;
|
||||
emit CommitBatch(_batchIndex, batchHash);
|
||||
}
|
||||
}
|
||||
|
||||
function _commitChunks(
|
||||
bytes[] memory _chunks,
|
||||
uint256 _totalL1MessagesPoppedOverall,
|
||||
bytes calldata _skippedL1MessageBitmap
|
||||
) internal view returns (bytes32 dataHash, uint256 _totalL1MessagesPoppedInBatch) {
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
// load `dataPtr` and reserve the memory region for chunk data hashes
|
||||
uint256 dataPtr;
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
|
||||
}
|
||||
|
||||
// compute the data hash for each chunk
|
||||
|
||||
unchecked {
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
uint256 _totalNumL1MessagesInChunk = _commitChunk(
|
||||
dataPtr,
|
||||
_chunks[i],
|
||||
_totalL1MessagesPoppedInBatch,
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_skippedL1MessageBitmap
|
||||
);
|
||||
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
|
||||
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
|
||||
dataPtr += 32;
|
||||
}
|
||||
|
||||
// check the length of bitmap
|
||||
require(
|
||||
((_totalL1MessagesPoppedInBatch + 255) / 256) * 32 == _skippedL1MessageBitmap.length,
|
||||
"wrong bitmap length"
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
|
||||
}
|
||||
}
|
||||
|
||||
function _finalizeBatch(
|
||||
uint256 memPtr,
|
||||
bytes32 _batchHash,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _aggrProof
|
||||
) internal {
|
||||
require(_postStateRoot != bytes32(0), "new state root is zero");
|
||||
|
||||
bytes32 _dataHash = BatchHeaderV0Codec.dataHash(memPtr);
|
||||
uint256 _batchIndex = BatchHeaderV0Codec.batchIndex(memPtr);
|
||||
require(committedBatches[_batchIndex] == _batchHash, "incorrect batch hash");
|
||||
|
||||
// fetch previous state root from storage.
|
||||
bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1];
|
||||
|
||||
// avoid duplicated verification
|
||||
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
|
||||
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash = keccak256(
|
||||
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
|
||||
);
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_batchIndex, _aggrProof, _publicInputHash);
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
FinalizationState memory cachedFinalizationState = finalizationState;
|
||||
require(uint256(cachedFinalizationState.lastIndex + 1) == _batchIndex, "incorrect batch index");
|
||||
cachedFinalizationState.lastIndex = uint128(_batchIndex);
|
||||
cachedFinalizationState.timestamp = uint64(block.timestamp);
|
||||
finalizationState = cachedFinalizationState;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
_popL1Messages(memPtr);
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
function _popL1Messages(uint256 memPtr) internal {
|
||||
uint256 _l1MessagePopped = BatchHeaderV0Codec.l1MessagePopped(memPtr);
|
||||
if (_l1MessagePopped > 0) {
|
||||
IL1MessageQueue _queue = IL1MessageQueue(messageQueue);
|
||||
|
||||
unchecked {
|
||||
uint256 _startIndex = BatchHeaderV0Codec.totalL1MessagePopped(memPtr) - _l1MessagePopped;
|
||||
|
||||
for (uint256 i = 0; i < _l1MessagePopped; i += 256) {
|
||||
uint256 _count = 256;
|
||||
if (_l1MessagePopped - i < _count) {
|
||||
_count = _l1MessagePopped - i;
|
||||
}
|
||||
uint256 _skippedBitmap = BatchHeaderV0Codec.skippedBitmap(memPtr, i / 256);
|
||||
|
||||
_queue.popCrossDomainMessage(_startIndex, _count, _skippedBitmap);
|
||||
|
||||
_startIndex += 256;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
23
contracts/src/libraries/verifier/IZkTrieVerifier.sol
Normal file
23
contracts/src/libraries/verifier/IZkTrieVerifier.sol
Normal file
@@ -0,0 +1,23 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
interface IZkTrieVerifier {
|
||||
/// @notice Internal function to validates a proof from eth_getProof.
|
||||
/// @param account The address of the contract.
|
||||
/// @param storageKey The storage slot to verify.
|
||||
/// @param proof The rlp encoding result of eth_getProof.
|
||||
/// @return stateRoot The computed state root. Must be checked by the caller.
|
||||
/// @return storageValue The value of `storageKey`.
|
||||
///
|
||||
/// The encoding order of `proof` is
|
||||
/// ```text
|
||||
/// | 1 byte | ... | 1 byte | ... |
|
||||
/// | account proof length | account proof | storage proof length | storage proof |
|
||||
/// ```
|
||||
function verifyZkTrieProof(
|
||||
address account,
|
||||
bytes32 storageKey,
|
||||
bytes calldata proof
|
||||
) external view returns (bytes32 stateRoot, bytes32 storageValue);
|
||||
}
|
||||
@@ -8,6 +8,6 @@ contract MockScrollChain is ScrollChain {
|
||||
constructor(address _messageQueue, address _verifier) ScrollChain(0, _messageQueue, _verifier) {}
|
||||
|
||||
function setLastFinalizedBatchIndex(uint256 _lastFinalizedBatchIndex) external {
|
||||
lastFinalizedBatchIndex = _lastFinalizedBatchIndex;
|
||||
finalizationState.lastIndex = uint128(_lastFinalizedBatchIndex);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user