mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 15:38:18 -05:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d667f9353 | ||
|
|
57a058c516 | ||
|
|
55612a0dbb | ||
|
|
a8b2706752 | ||
|
|
d9ae117548 |
@@ -1,5 +1,7 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://codecov.io/gh/scroll-tech/scroll)
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/iris-contrib/middleware/cors"
|
||||
"github.com/kataras/iris/v12"
|
||||
"github.com/kataras/iris/v12/mvc"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -60,6 +61,11 @@ func init() {
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
corsOptions := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
AllowCredentials: true,
|
||||
})
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
@@ -72,6 +78,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
defer database.Close()
|
||||
bridgeApp := iris.New()
|
||||
bridgeApp.UseRouter(corsOptions)
|
||||
bridgeApp.Get("/ping", pong).Describe("healthcheck")
|
||||
|
||||
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.7
|
||||
|
||||
@@ -242,6 +242,8 @@ github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
|
||||
|
||||
3710
common/testdata/blockTrace_05.json
vendored
Normal file
3710
common/testdata/blockTrace_05.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3678
common/testdata/blockTrace_06.json
vendored
Normal file
3678
common/testdata/blockTrace_06.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3662
common/testdata/blockTrace_07.json
vendored
Normal file
3662
common/testdata/blockTrace_07.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -17,48 +18,99 @@ type BatchHeader struct {
|
||||
totalL1MessagePopped uint64
|
||||
dataHash common.Hash
|
||||
parentBatchHash common.Hash
|
||||
skippedL1MessageBitmap []*big.Int // LSB is the first L1 message
|
||||
skippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// NewBatchHeader creates a new BatchHeader
|
||||
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
|
||||
// TODO calculate `l1MessagePopped`, `totalL1MessagePopped`, and `skippedL1MessageBitmap` based on `chunks`
|
||||
// buffer for storing chunk hashes in order to compute the batch data hash
|
||||
var dataBytes []byte
|
||||
totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore
|
||||
|
||||
// skipped L1 message bitmap, an array of 256-bit bitmaps
|
||||
var skippedBitmap []*big.Int
|
||||
|
||||
// the first queue index that belongs to this batch
|
||||
baseIndex := totalL1MessagePoppedBefore
|
||||
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range chunks {
|
||||
// Build dataHash
|
||||
// build data hash
|
||||
totalL1MessagePoppedBeforeChunk := nextIndex
|
||||
chunkBytes, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
|
||||
|
||||
dataBytes = append(dataBytes, chunkBytes...)
|
||||
|
||||
// build skip bitmap
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != 0x7E {
|
||||
continue
|
||||
}
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
|
||||
quo := int((skippedIndex - baseIndex) / 256)
|
||||
rem := int((skippedIndex - baseIndex) % 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
|
||||
}
|
||||
|
||||
// process included message
|
||||
quo := int((currentIndex - baseIndex) / 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
|
||||
nextIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compute data hash
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
// compute skipped bitmap
|
||||
bitmapBytes := make([]byte, len(skippedBitmap)*32)
|
||||
for ii, num := range skippedBitmap {
|
||||
bytes := num.Bytes()
|
||||
padding := 32 - len(bytes)
|
||||
copy(bitmapBytes[32*ii+padding:], bytes)
|
||||
}
|
||||
|
||||
return &BatchHeader{
|
||||
version: version,
|
||||
batchIndex: batchIndex,
|
||||
l1MessagePopped: 0, // TODO
|
||||
totalL1MessagePopped: totalL1MessagePoppedBefore, // TODO
|
||||
l1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
|
||||
totalL1MessagePopped: nextIndex,
|
||||
dataHash: dataHash,
|
||||
parentBatchHash: parentBatchHash,
|
||||
skippedL1MessageBitmap: nil, // TODO
|
||||
skippedL1MessageBitmap: bitmapBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
|
||||
func (b *BatchHeader) Encode() []byte {
|
||||
batchBytes := make([]byte, 89)
|
||||
batchBytes := make([]byte, 89+len(b.skippedL1MessageBitmap))
|
||||
batchBytes[0] = b.version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.dataHash[:])
|
||||
copy(batchBytes[57:], b.parentBatchHash[:])
|
||||
// TODO: encode skippedL1MessageBitmap
|
||||
|
||||
copy(batchBytes[89:], b.skippedL1MessageBitmap[:])
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -32,9 +33,100 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// 1 L1 Msg in 1 bitmap
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff" // skip first 10
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 37, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(5), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000" // all bits are included, so none are skipped
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(42), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff" // skipped the first 37 messages
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many sparse L1 Msgs in 1 bitmap
|
||||
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock4 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace4, wrappedBlock4))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock4,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(10), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd" // 0111011101
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many L1 Msgs in each of 2 bitmaps
|
||||
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock5 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace5, wrappedBlock5))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock5,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(257), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 64, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
}
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -60,9 +152,28 @@ func TestBatchHeaderEncode(t *testing.T) {
|
||||
bytes := batchHeader.Encode()
|
||||
assert.Equal(t, 89, len(bytes))
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes = batchHeader.Encode()
|
||||
assert.Equal(t, 121, len(bytes))
|
||||
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
|
||||
}
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -103,4 +214,21 @@ func TestBatchHeaderHash(t *testing.T) {
|
||||
assert.NotNil(t, batchHeader2)
|
||||
hash2 := batchHeader2.Hash()
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash = batchHeader.Hash()
|
||||
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v3.3.5"
|
||||
var tag = "v3.3.7"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
118
contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol
Normal file
118
contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol
Normal file
@@ -0,0 +1,118 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the address of verifier is updated.
|
||||
/// @param startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param verifier The address of new verifier.
|
||||
event UpdateVerifier(uint256 startBatchIndex, address verifier);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct Verifier {
|
||||
// The start batch index for the verifier.
|
||||
uint64 startBatchIndex;
|
||||
// The address of zkevm verifier.
|
||||
address verifier;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The list of legacy zkevm verifier, sorted by batchIndex in increasing order.
|
||||
Verifier[] public legacyVerifiers;
|
||||
|
||||
/// @notice The lastest used zkevm verifier.
|
||||
Verifier public latestVerifier;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _verifier) {
|
||||
require(_verifier != address(0), "zero verifier address");
|
||||
|
||||
latestVerifier.verifier = _verifier;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
function legacyVerifiersLength() external view returns (uint256) {
|
||||
return legacyVerifiers.length;
|
||||
}
|
||||
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
function getVerifier(uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier;
|
||||
|
||||
if (_verifier.startBatchIndex > _batchIndex) {
|
||||
uint256 _length = legacyVerifiers.length;
|
||||
// In most case, only last few verifier will be used by `ScrollChain`.
|
||||
// So, we use linear search instead of binary search.
|
||||
unchecked {
|
||||
for (uint256 i = _length; i > 0; --i) {
|
||||
_verifier = legacyVerifiers[i - 1];
|
||||
if (_verifier.startBatchIndex <= _batchIndex) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return _verifier.verifier;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IRollupVerifier
|
||||
function verifyAggregateProof(
|
||||
uint256 _batchIndex,
|
||||
bytes calldata _aggrProof,
|
||||
bytes32 _publicInputHash
|
||||
) external view override {
|
||||
address _verifier = getVerifier(_batchIndex);
|
||||
|
||||
IZkEvmVerifier(_verifier).verify(_aggrProof, _publicInputHash);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(uint64 _startBatchIndex, address _verifier) external onlyOwner {
|
||||
Verifier memory _latestVerifier = latestVerifier;
|
||||
require(_startBatchIndex >= _latestVerifier.startBatchIndex, "start batch index too small");
|
||||
require(_verifier != address(0), "zero verifier address");
|
||||
|
||||
if (_latestVerifier.startBatchIndex < _startBatchIndex) {
|
||||
legacyVerifiers.push(_latestVerifier);
|
||||
_latestVerifier.startBatchIndex = _startBatchIndex;
|
||||
}
|
||||
_latestVerifier.verifier = _verifier;
|
||||
|
||||
latestVerifier = _latestVerifier;
|
||||
|
||||
emit UpdateVerifier(_startBatchIndex, _verifier);
|
||||
}
|
||||
}
|
||||
@@ -308,7 +308,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
);
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_batchIndex, _aggrProof, _publicInputHash);
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
|
||||
@@ -4,7 +4,12 @@ pragma solidity ^0.8.0;
|
||||
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
/// @param aggrProof The aggregated proof.
|
||||
/// @param publicInputHash The public input hash.
|
||||
function verifyAggregateProof(bytes calldata aggrProof, bytes32 publicInputHash) external view;
|
||||
function verifyAggregateProof(
|
||||
uint256 batchIndex,
|
||||
bytes calldata aggrProof,
|
||||
bytes32 publicInputHash
|
||||
) external view;
|
||||
}
|
||||
|
||||
10
contracts/src/libraries/verifier/IZkEvmVerifier.sol
Normal file
10
contracts/src/libraries/verifier/IZkEvmVerifier.sol
Normal file
@@ -0,0 +1,10 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IZkEvmVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param aggrProof The aggregated proof.
|
||||
/// @param publicInputHash The public input hash.
|
||||
function verify(bytes calldata aggrProof, bytes32 publicInputHash) external view;
|
||||
}
|
||||
105
contracts/src/test/MultipleVersionRollupVerifier.t.sol
Normal file
105
contracts/src/test/MultipleVersionRollupVerifier.t.sol
Normal file
@@ -0,0 +1,105 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {L1MessageQueue} from "../L1/rollup/L1MessageQueue.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
|
||||
import {MockZkEvmVerifier} from "./mocks/MockZkEvmVerifier.sol";
|
||||
|
||||
contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
// from MultipleVersionRollupVerifier
|
||||
event UpdateVerifier(uint256 startBatchIndex, address verifier);
|
||||
|
||||
MultipleVersionRollupVerifier private verifier;
|
||||
MockZkEvmVerifier private v0;
|
||||
MockZkEvmVerifier private v1;
|
||||
MockZkEvmVerifier private v2;
|
||||
|
||||
function setUp() external {
|
||||
v0 = new MockZkEvmVerifier();
|
||||
v1 = new MockZkEvmVerifier();
|
||||
v2 = new MockZkEvmVerifier();
|
||||
|
||||
verifier = new MultipleVersionRollupVerifier(address(v0));
|
||||
}
|
||||
|
||||
function testUpdateVerifier(address _newVerifier) external {
|
||||
hevm.assume(_newVerifier != address(0));
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// zero verifier address, revert
|
||||
hevm.expectRevert("zero verifier address");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
|
||||
// change to random operator
|
||||
assertEq(verifier.legacyVerifiersLength(), 0);
|
||||
verifier.updateVerifier(uint64(100), _newVerifier);
|
||||
assertEq(verifier.legacyVerifiersLength(), 1);
|
||||
(uint64 _startBatchIndex, address _verifier) = verifier.latestVerifier();
|
||||
assertEq(_startBatchIndex, uint64(100));
|
||||
assertEq(_verifier, _newVerifier);
|
||||
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
|
||||
assertEq(_startBatchIndex, uint64(0));
|
||||
assertEq(_verifier, address(v0));
|
||||
|
||||
// change to same batch index
|
||||
verifier.updateVerifier(uint64(100), address(v1));
|
||||
(_startBatchIndex, _verifier) = verifier.latestVerifier();
|
||||
assertEq(_startBatchIndex, uint64(100));
|
||||
assertEq(_verifier, address(v1));
|
||||
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
|
||||
assertEq(_startBatchIndex, uint64(0));
|
||||
assertEq(_verifier, address(v0));
|
||||
|
||||
// start batch index too small, revert
|
||||
hevm.expectRevert("start batch index too small");
|
||||
verifier.updateVerifier(99, _newVerifier);
|
||||
}
|
||||
|
||||
function testGetVerifier() external {
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
assertEq(verifier.getVerifier(0), address(v0));
|
||||
assertEq(verifier.getVerifier(1), address(v0));
|
||||
assertEq(verifier.getVerifier(99), address(v0));
|
||||
assertEq(verifier.getVerifier(100), address(v1));
|
||||
assertEq(verifier.getVerifier(101), address(v1));
|
||||
assertEq(verifier.getVerifier(299), address(v1));
|
||||
assertEq(verifier.getVerifier(300), address(v2));
|
||||
assertEq(verifier.getVerifier(301), address(v2));
|
||||
assertEq(verifier.getVerifier(10000), address(v2));
|
||||
}
|
||||
|
||||
function testVerifyAggregateProof() external {
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(0, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(1, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(99, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(100, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(101, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(299, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(300, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(301, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(10000, new bytes(0), bytes32(0));
|
||||
}
|
||||
}
|
||||
@@ -6,5 +6,9 @@ import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
|
||||
contract MockRollupVerifier is IRollupVerifier {
|
||||
/// @inheritdoc IRollupVerifier
|
||||
function verifyAggregateProof(bytes calldata, bytes32) external view {}
|
||||
function verifyAggregateProof(
|
||||
uint256,
|
||||
bytes calldata,
|
||||
bytes32
|
||||
) external view {}
|
||||
}
|
||||
|
||||
14
contracts/src/test/mocks/MockZkEvmVerifier.sol
Normal file
14
contracts/src/test/mocks/MockZkEvmVerifier.sol
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
contract MockZkEvmVerifier is IZkEvmVerifier {
|
||||
event Called(address);
|
||||
|
||||
/// @inheritdoc IZkEvmVerifier
|
||||
function verify(bytes calldata, bytes32) external view {
|
||||
revert(string(abi.encode(address(this))));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user