Compare commits

..

18 Commits

Author SHA1 Message Date
Lawliet-Chan
50305f3039 Merge branch 'develop' into manager_api 2023-06-16 21:31:39 +08:00
ChuhanJin
8d667f9353 fix(bridge-history-api): fix wrong table name (#584)
Co-authored-by: vincent <419436363@qq.com>
2023-06-16 21:23:38 +08:00
Lawliet-Chan
dfc9a44743 Merge branch 'develop' into manager_api 2023-06-16 20:55:36 +08:00
xinran chen
08c49d9b2c uber atomic pkg 2023-06-16 20:54:52 +08:00
xinran chen
ecb3f5a043 pause until 2023-06-16 20:49:18 +08:00
Xi Lin
57a058c516 feat(contracts): add multiple version for rollup verifier (#549)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-16 19:50:13 +08:00
ChuhanJin
55612a0dbb fix(bridge-history-api): Avoid cors issue in bridgehistoryapi-server (#583)
Co-authored-by: vincent <419436363@qq.com>
2023-06-16 19:42:12 +08:00
xinran chen
7d9e111e9c default pauseSendTask 2023-06-16 17:31:20 +08:00
xinran chen
25e43462c6 bump version 2023-06-16 17:26:35 +08:00
Lawliet-Chan
74e0960dc5 Merge branch 'develop' into manager_api 2023-06-16 17:13:33 +08:00
Richord
a8b2706752 feat(batch proposer): implemented l1msgtx fields in batch header (#567)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-06-16 10:54:24 +02:00
Lawliet-Chan
76cfb97f99 Update coordinator/manager.go
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-06-16 16:35:37 +08:00
georgehao
d9ae117548 docs: update README.md (#578)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-06-16 15:21:16 +08:00
xinran chen
6880dd83da go mod tidy 2023-06-16 14:02:45 +08:00
xinran chen
9d6e53a120 fix 2023-06-16 13:59:51 +08:00
xinran chen
0940788143 use uber pkg instead 2023-06-16 13:57:54 +08:00
xinran chen
ad46a85a2d fix lint 2023-06-16 13:53:46 +08:00
xinran chen
9d29a95675 manager api 2023-06-16 13:46:18 +08:00
23 changed files with 11581 additions and 20 deletions

View File

@@ -1,5 +1,7 @@
# Scroll Monorepo
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
## Prerequisites
+ Go 1.19
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))

View File

@@ -5,6 +5,7 @@ import (
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/iris-contrib/middleware/cors"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/mvc"
"github.com/urfave/cli/v2"
@@ -60,6 +61,11 @@ func init() {
}
func action(ctx *cli.Context) error {
corsOptions := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowCredentials: true,
})
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
@@ -72,6 +78,7 @@ func action(ctx *cli.Context) error {
}
defer database.Close()
bridgeApp := iris.New()
bridgeApp.UseRouter(corsOptions)
bridgeApp.Get("/ping", pong).Describe("healthcheck")
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)

View File

@@ -22,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil

View File

@@ -4,6 +4,7 @@ go 1.19
require (
github.com/ethereum/go-ethereum v1.12.0
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
github.com/jmoiron/sqlx v1.3.5
github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.7

View File

@@ -242,6 +242,8 @@ github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=

3710
common/testdata/blockTrace_05.json vendored Normal file

File diff suppressed because it is too large Load Diff

3678
common/testdata/blockTrace_06.json vendored Normal file

File diff suppressed because it is too large Load Diff

3662
common/testdata/blockTrace_07.json vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@ package types
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
@@ -17,48 +18,99 @@ type BatchHeader struct {
totalL1MessagePopped uint64
dataHash common.Hash
parentBatchHash common.Hash
skippedL1MessageBitmap []*big.Int // LSB is the first L1 message
skippedL1MessageBitmap []byte
}
// NewBatchHeader creates a new BatchHeader
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
// TODO calculate `l1MessagePopped`, `totalL1MessagePopped`, and `skippedL1MessageBitmap` based on `chunks`
// buffer for storing chunk hashes in order to compute the batch data hash
var dataBytes []byte
totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore
// skipped L1 message bitmap, an array of 256-bit bitmaps
var skippedBitmap []*big.Int
// the first queue index that belongs to this batch
baseIndex := totalL1MessagePoppedBefore
// the next queue index that we need to process
nextIndex := totalL1MessagePoppedBefore
for _, chunk := range chunks {
// Build dataHash
// build data hash
totalL1MessagePoppedBeforeChunk := nextIndex
chunkBytes, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
if err != nil {
return nil, err
}
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
dataBytes = append(dataBytes, chunkBytes...)
// build skip bitmap
for _, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != 0x7E {
continue
}
currentIndex := tx.Nonce
if currentIndex < nextIndex {
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
}
// mark skipped messages
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
quo := int((skippedIndex - baseIndex) / 256)
rem := int((skippedIndex - baseIndex) % 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
}
// process included message
quo := int((currentIndex - baseIndex) / 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
nextIndex = currentIndex + 1
}
}
}
// compute data hash
dataHash := crypto.Keccak256Hash(dataBytes)
// compute skipped bitmap
bitmapBytes := make([]byte, len(skippedBitmap)*32)
for ii, num := range skippedBitmap {
bytes := num.Bytes()
padding := 32 - len(bytes)
copy(bitmapBytes[32*ii+padding:], bytes)
}
return &BatchHeader{
version: version,
batchIndex: batchIndex,
l1MessagePopped: 0, // TODO
totalL1MessagePopped: totalL1MessagePoppedBefore, // TODO
l1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
totalL1MessagePopped: nextIndex,
dataHash: dataHash,
parentBatchHash: parentBatchHash,
skippedL1MessageBitmap: nil, // TODO
skippedL1MessageBitmap: bitmapBytes,
}, nil
}
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
func (b *BatchHeader) Encode() []byte {
batchBytes := make([]byte, 89)
batchBytes := make([]byte, 89+len(b.skippedL1MessageBitmap))
batchBytes[0] = b.version
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
copy(batchBytes[25:], b.dataHash[:])
copy(batchBytes[57:], b.parentBatchHash[:])
// TODO: encode skippedL1MessageBitmap
copy(batchBytes[89:], b.skippedL1MessageBitmap[:])
return batchBytes
}

View File

@@ -10,6 +10,7 @@ import (
)
func TestNewBatchHeader(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -32,9 +33,100 @@ func TestNewBatchHeader(t *testing.T) {
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
// 1 L1 Msg in 1 bitmap
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff" // skip first 10
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock3,
},
}
batchHeader, err = NewBatchHeader(1, 1, 37, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, uint64(5), batchHeader.l1MessagePopped)
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000" // all bits are included, so none are skipped
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock3,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, uint64(42), batchHeader.l1MessagePopped)
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff" // skipped the first 37 messages
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many sparse L1 Msgs in 1 bitmap
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
assert.NoError(t, err)
wrappedBlock4 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace4, wrappedBlock4))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock4,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, uint64(10), batchHeader.l1MessagePopped)
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd" // 0111011101
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
// many L1 Msgs in each of 2 bitmaps
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
assert.NoError(t, err)
wrappedBlock5 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace5, wrappedBlock5))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock5,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
assert.Equal(t, uint64(257), batchHeader.l1MessagePopped)
assert.Equal(t, 64, len(batchHeader.skippedL1MessageBitmap))
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
}
func TestBatchHeaderEncode(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -60,9 +152,28 @@ func TestBatchHeaderEncode(t *testing.T) {
bytes := batchHeader.Encode()
assert.Equal(t, 89, len(bytes))
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
// With L1 Msg
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
bytes = batchHeader.Encode()
assert.Equal(t, 121, len(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
}
func TestBatchHeaderHash(t *testing.T) {
// Without L1 Msg
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -103,4 +214,21 @@ func TestBatchHeaderHash(t *testing.T) {
assert.NotNil(t, batchHeader2)
hash2 := batchHeader2.Hash()
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
// With L1 Msg
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock3 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock3,
},
}
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
hash = batchHeader.Hash()
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v3.3.5"
var tag = "v3.3.7"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/**********
* Events *
**********/
/// @notice Emitted when the address of verifier is updated.
/// @param startBatchIndex The start batch index when the verifier will be used.
/// @param verifier The address of new verifier.
event UpdateVerifier(uint256 startBatchIndex, address verifier);
/***********
* Structs *
***********/
struct Verifier {
// The start batch index for the verifier.
uint64 startBatchIndex;
// The address of zkevm verifier.
address verifier;
}
/*************
* Variables *
*************/
/// @notice The list of legacy zkevm verifier, sorted by batchIndex in increasing order.
Verifier[] public legacyVerifiers;
/// @notice The lastest used zkevm verifier.
Verifier public latestVerifier;
/***************
* Constructor *
***************/
constructor(address _verifier) {
require(_verifier != address(0), "zero verifier address");
latestVerifier.verifier = _verifier;
}
/*************************
* Public View Functions *
*************************/
/// @notice Return the number of legacy verifiers.
function legacyVerifiersLength() external view returns (uint256) {
return legacyVerifiers.length;
}
/// @notice Compute the verifier should be used for specific batch.
/// @param _batchIndex The batch index to query.
function getVerifier(uint256 _batchIndex) public view returns (address) {
// Normally, we will use the latest verifier.
Verifier memory _verifier = latestVerifier;
if (_verifier.startBatchIndex > _batchIndex) {
uint256 _length = legacyVerifiers.length;
// In most case, only last few verifier will be used by `ScrollChain`.
// So, we use linear search instead of binary search.
unchecked {
for (uint256 i = _length; i > 0; --i) {
_verifier = legacyVerifiers[i - 1];
if (_verifier.startBatchIndex <= _batchIndex) break;
}
}
}
return _verifier.verifier;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IRollupVerifier
function verifyAggregateProof(
uint256 _batchIndex,
bytes calldata _aggrProof,
bytes32 _publicInputHash
) external view override {
address _verifier = getVerifier(_batchIndex);
IZkEvmVerifier(_verifier).verify(_aggrProof, _publicInputHash);
}
/************************
* Restricted Functions *
************************/
/// @notice Update the address of zkevm verifier.
/// @param _startBatchIndex The start batch index when the verifier will be used.
/// @param _verifier The address of new verifier.
function updateVerifier(uint64 _startBatchIndex, address _verifier) external onlyOwner {
Verifier memory _latestVerifier = latestVerifier;
require(_startBatchIndex >= _latestVerifier.startBatchIndex, "start batch index too small");
require(_verifier != address(0), "zero verifier address");
if (_latestVerifier.startBatchIndex < _startBatchIndex) {
legacyVerifiers.push(_latestVerifier);
_latestVerifier.startBatchIndex = _startBatchIndex;
}
_latestVerifier.verifier = _verifier;
latestVerifier = _latestVerifier;
emit UpdateVerifier(_startBatchIndex, _verifier);
}
}

View File

@@ -308,7 +308,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
);
// verify batch
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);
IRollupVerifier(verifier).verifyAggregateProof(_batchIndex, _aggrProof, _publicInputHash);
// check and update lastFinalizedBatchIndex
unchecked {

View File

@@ -4,7 +4,12 @@ pragma solidity ^0.8.0;
interface IRollupVerifier {
/// @notice Verify aggregate zk proof.
/// @param batchIndex The batch index to verify.
/// @param aggrProof The aggregated proof.
/// @param publicInputHash The public input hash.
function verifyAggregateProof(bytes calldata aggrProof, bytes32 publicInputHash) external view;
function verifyAggregateProof(
uint256 batchIndex,
bytes calldata aggrProof,
bytes32 publicInputHash
) external view;
}

View File

@@ -0,0 +1,10 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
interface IZkEvmVerifier {
/// @notice Verify aggregate zk proof.
/// @param aggrProof The aggregated proof.
/// @param publicInputHash The public input hash.
function verify(bytes calldata aggrProof, bytes32 publicInputHash) external view;
}

View File

@@ -0,0 +1,105 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {L1MessageQueue} from "../L1/rollup/L1MessageQueue.sol";
import {MultipleVersionRollupVerifier} from "../L1/rollup/MultipleVersionRollupVerifier.sol";
import {MockZkEvmVerifier} from "./mocks/MockZkEvmVerifier.sol";
contract MultipleVersionRollupVerifierTest is DSTestPlus {
// from MultipleVersionRollupVerifier
event UpdateVerifier(uint256 startBatchIndex, address verifier);
MultipleVersionRollupVerifier private verifier;
MockZkEvmVerifier private v0;
MockZkEvmVerifier private v1;
MockZkEvmVerifier private v2;
function setUp() external {
v0 = new MockZkEvmVerifier();
v1 = new MockZkEvmVerifier();
v2 = new MockZkEvmVerifier();
verifier = new MultipleVersionRollupVerifier(address(v0));
}
function testUpdateVerifier(address _newVerifier) external {
hevm.assume(_newVerifier != address(0));
// set by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
verifier.updateVerifier(0, address(0));
hevm.stopPrank();
// zero verifier address, revert
hevm.expectRevert("zero verifier address");
verifier.updateVerifier(0, address(0));
// change to random operator
assertEq(verifier.legacyVerifiersLength(), 0);
verifier.updateVerifier(uint64(100), _newVerifier);
assertEq(verifier.legacyVerifiersLength(), 1);
(uint64 _startBatchIndex, address _verifier) = verifier.latestVerifier();
assertEq(_startBatchIndex, uint64(100));
assertEq(_verifier, _newVerifier);
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
assertEq(_startBatchIndex, uint64(0));
assertEq(_verifier, address(v0));
// change to same batch index
verifier.updateVerifier(uint64(100), address(v1));
(_startBatchIndex, _verifier) = verifier.latestVerifier();
assertEq(_startBatchIndex, uint64(100));
assertEq(_verifier, address(v1));
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
assertEq(_startBatchIndex, uint64(0));
assertEq(_verifier, address(v0));
// start batch index too small, revert
hevm.expectRevert("start batch index too small");
verifier.updateVerifier(99, _newVerifier);
}
function testGetVerifier() external {
verifier.updateVerifier(100, address(v1));
verifier.updateVerifier(300, address(v2));
assertEq(verifier.getVerifier(0), address(v0));
assertEq(verifier.getVerifier(1), address(v0));
assertEq(verifier.getVerifier(99), address(v0));
assertEq(verifier.getVerifier(100), address(v1));
assertEq(verifier.getVerifier(101), address(v1));
assertEq(verifier.getVerifier(299), address(v1));
assertEq(verifier.getVerifier(300), address(v2));
assertEq(verifier.getVerifier(301), address(v2));
assertEq(verifier.getVerifier(10000), address(v2));
}
function testVerifyAggregateProof() external {
verifier.updateVerifier(100, address(v1));
verifier.updateVerifier(300, address(v2));
hevm.expectRevert(abi.encode(address(v0)));
verifier.verifyAggregateProof(0, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v0)));
verifier.verifyAggregateProof(1, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v0)));
verifier.verifyAggregateProof(99, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v1)));
verifier.verifyAggregateProof(100, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v1)));
verifier.verifyAggregateProof(101, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v1)));
verifier.verifyAggregateProof(299, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v2)));
verifier.verifyAggregateProof(300, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v2)));
verifier.verifyAggregateProof(301, new bytes(0), bytes32(0));
hevm.expectRevert(abi.encode(address(v2)));
verifier.verifyAggregateProof(10000, new bytes(0), bytes32(0));
}
}

View File

@@ -6,5 +6,9 @@ import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
contract MockRollupVerifier is IRollupVerifier {
/// @inheritdoc IRollupVerifier
function verifyAggregateProof(bytes calldata, bytes32) external view {}
function verifyAggregateProof(
uint256,
bytes calldata,
bytes32
) external view {}
}

View File

@@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
contract MockZkEvmVerifier is IZkEvmVerifier {
event Called(address);
/// @inheritdoc IZkEvmVerifier
function verify(bytes calldata, bytes32) external view {
revert(string(abi.encode(address(this))));
}
}

View File

@@ -25,6 +25,13 @@ type RollerAPI interface {
SubmitProof(proof *message.ProofMsg) error
}
// AdminAPI for Coordinator in order to manage process.
type AdminAPI interface {
StartSend()
PauseSend()
PauseSendUntil(batchIdx uint64)
}
// RequestToken generates and sends back register token for roller
func (m *Manager) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
@@ -127,3 +134,18 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) error {
return nil
}
// StartSend starts to send basic tasks.
func (m *Manager) StartSend() {
m.StartSendTask()
}
// PauseSend pauses to send basic tasks.
func (m *Manager) PauseSend() {
m.PauseSendTask()
}
// PauseSendUntil pause to send basic tasks until batchIdx.
func (m *Manager) PauseSendUntil(batchIdx uint64) {
m.PauseSendTaskUntil(batchIdx)
}

View File

@@ -17,7 +17,9 @@ const (
// RollerManagerConfig loads sequencer configuration items.
type RollerManagerConfig struct {
CompressionLevel int `json:"compression_level,omitempty"`
PauseSendTask bool `json:"pause_send_task"`
PauseSendTaskUntil uint64 `json:"pause_send_task_until"`
CompressionLevel int `json:"compression_level,omitempty"`
// asc or desc (default: asc)
OrderSession string `json:"order_session,omitempty"`
// The amount of rollers to pick per proof generation session.

View File

@@ -9,6 +9,7 @@ require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
go.uber.org/atomic v1.11.0
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
golang.org/x/sync v0.1.0
)

View File

@@ -133,6 +133,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=

View File

@@ -15,6 +15,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
uatomic "go.uber.org/atomic"
"golang.org/x/exp/rand"
"scroll-tech/common/metrics"
@@ -75,7 +76,9 @@ type Manager struct {
cfg *config.RollerManagerConfig
// The indicator whether the backend is running or not.
running int32
running int32
sendTaskPaused *uatomic.Bool
pauseUntilBatchIdx *uatomic.Uint64
// A mutex guarding the boolean below.
mu sync.RWMutex
@@ -117,6 +120,8 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
return &Manager{
ctx: ctx,
cfg: cfg,
sendTaskPaused: uatomic.NewBool(cfg.PauseSendTask),
pauseUntilBatchIdx: uatomic.NewUint64(cfg.PauseSendTaskUntil),
rollerPool: cmap.New(),
sessions: make(map[string]*session),
failedSessionInfos: make(map[string]*SessionInfo),
@@ -201,7 +206,13 @@ func (m *Manager) Loop() {
}
}
// Select basic type roller and send message
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
for len(tasks) > 0 {
if m.isSendTaskPaused(tasks[0].Index) {
break
}
if !m.StartBasicProofGenerationSession(tasks[0], nil) {
break
}
tasks = tasks[1:]
}
case <-m.ctx.Done():
@@ -559,6 +570,11 @@ func (m *Manager) APIs() []rpc.API {
Service: RollerAPI(m),
Public: true,
},
{
Namespace: "admin",
Service: AdminAPI(m),
Public: true,
},
{
Namespace: "debug",
Public: true,
@@ -567,6 +583,26 @@ func (m *Manager) APIs() []rpc.API {
}
}
// StartSendTask starts to send basic tasks loop.
func (m *Manager) StartSendTask() {
m.sendTaskPaused.Store(false)
}
// PauseSendTask pauses to send basic tasks loop.
func (m *Manager) PauseSendTask() {
m.sendTaskPaused.Store(true)
}
// PauseSendTaskUntil pauses to send basic tasks loop until batchIdx.
func (m *Manager) PauseSendTaskUntil(batchIdx uint64) {
m.PauseSendTask()
m.pauseUntilBatchIdx.Store(batchIdx)
}
func (m *Manager) isSendTaskPaused(batchIdx uint64) bool {
return m.sendTaskPaused.Load() && m.pauseUntilBatchIdx.Load() > batchIdx
}
// StartBasicProofGenerationSession starts a basic proof generation session
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
var taskID string