mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 16:08:04 -05:00
Compare commits
25 Commits
feat/integ
...
manager_ap
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50305f3039 | ||
|
|
8d667f9353 | ||
|
|
dfc9a44743 | ||
|
|
08c49d9b2c | ||
|
|
ecb3f5a043 | ||
|
|
57a058c516 | ||
|
|
55612a0dbb | ||
|
|
7d9e111e9c | ||
|
|
25e43462c6 | ||
|
|
74e0960dc5 | ||
|
|
a8b2706752 | ||
|
|
76cfb97f99 | ||
|
|
d9ae117548 | ||
|
|
de2669da2b | ||
|
|
6880dd83da | ||
|
|
9d6e53a120 | ||
|
|
0940788143 | ||
|
|
ad46a85a2d | ||
|
|
9d29a95675 | ||
|
|
b1d7654970 | ||
|
|
a6164046e1 | ||
|
|
119e62d4b1 | ||
|
|
16e0cbf542 | ||
|
|
bfaf2fd0e2 | ||
|
|
ea227b5c85 |
8
.github/workflows/bridge.yml
vendored
8
.github/workflows/bridge.yml
vendored
@@ -103,7 +103,13 @@ jobs:
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: bridge
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
6
.github/workflows/bridge_history_api.yml
vendored
6
.github/workflows/bridge_history_api.yml
vendored
@@ -53,6 +53,12 @@ jobs:
|
||||
run: |
|
||||
go get ./...
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: bridge-history-api
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/common.yml
vendored
8
.github/workflows/common.yml
vendored
@@ -94,4 +94,10 @@ jobs:
|
||||
- name: Test common packages
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: common
|
||||
|
||||
9
.github/workflows/coordinator.yml
vendored
9
.github/workflows/coordinator.yml
vendored
@@ -110,4 +110,11 @@ jobs:
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic -tags mock_verifier ./...
|
||||
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: coordinator
|
||||
|
||||
8
.github/workflows/database.yml
vendored
8
.github/workflows/database.yml
vendored
@@ -87,4 +87,10 @@ jobs:
|
||||
- name: Test database packages
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: database
|
||||
|
||||
2
.github/workflows/integration.yaml
vendored
2
.github/workflows/integration.yaml
vendored
@@ -40,4 +40,4 @@ jobs:
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...
|
||||
|
||||
8
.github/workflows/roller.yml
vendored
8
.github/workflows/roller.yml
vendored
@@ -47,7 +47,13 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
make roller
|
||||
go test -tags="mock_prover" -v ./...
|
||||
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: roller
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
100
Jenkinsfile
vendored
100
Jenkinsfile
vendored
@@ -1,100 +0,0 @@
|
||||
imagePrefix = 'scrolltech'
|
||||
credentialDocker = 'dockerhub'
|
||||
|
||||
pipeline {
|
||||
agent any
|
||||
options {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
nodejs "nodejs"
|
||||
go 'go-1.19'
|
||||
}
|
||||
environment {
|
||||
GOBIN = '/home/ubuntu/go/bin/'
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
|
||||
CHAIN_ID='534353'
|
||||
// LOG_DOCKER = 'true'
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('Build Prerequisite') {
|
||||
steps {
|
||||
sh 'make dev_docker'
|
||||
sh 'make -C bridge mock_abi'
|
||||
sh 'make -C common/bytecode all'
|
||||
}
|
||||
}
|
||||
stage('Check Bridge Compilation') {
|
||||
steps {
|
||||
sh 'make -C bridge bridge_bins'
|
||||
}
|
||||
}
|
||||
stage('Check Coordinator Compilation') {
|
||||
steps {
|
||||
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
|
||||
sh 'make -C coordinator coordinator'
|
||||
}
|
||||
}
|
||||
stage('Check Database Compilation') {
|
||||
steps {
|
||||
sh 'make -C database db_cli'
|
||||
}
|
||||
}
|
||||
stage('Check Database Docker Build') {
|
||||
steps {
|
||||
sh 'make -C database docker'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Parallel Test') {
|
||||
parallel{
|
||||
stage('Race test common package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh "cd ./bridge && ../build/run_tests.sh bridge"
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh 'cd ./coordinator && go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=../coverage.coordinator.txt -covermode=atomic ./...'
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
steps {
|
||||
sh './build/post-test-report-coverage.sh'
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://codecov.io/gh/scroll-tech/scroll)
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
|
||||
@@ -325,3 +325,26 @@ type L2FailedRelayedMessageEvent struct {
|
||||
type L2RelayedMessageEvent struct {
|
||||
MessageHash common.Hash
|
||||
}
|
||||
|
||||
// IScrollChainBatch is an auto generated low-level Go binding around an user-defined struct.
|
||||
type IScrollChainBatch struct {
|
||||
Blocks []IScrollChainBlockContext
|
||||
PrevStateRoot common.Hash
|
||||
NewStateRoot common.Hash
|
||||
WithdrawTrieRoot common.Hash
|
||||
BatchIndex uint64
|
||||
ParentBatchHash common.Hash
|
||||
L2Transactions []byte
|
||||
}
|
||||
|
||||
// IScrollChainBlockContext is an auto generated low-level Go binding around an user-defined struct.
|
||||
type IScrollChainBlockContext struct {
|
||||
BlockHash common.Hash
|
||||
ParentHash common.Hash
|
||||
BlockNumber uint64
|
||||
Timestamp uint64
|
||||
BaseFee *big.Int
|
||||
GasLimit uint64
|
||||
NumTransactions uint16
|
||||
NumL1Messages uint16
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/iris-contrib/middleware/cors"
|
||||
"github.com/kataras/iris/v12"
|
||||
"github.com/kataras/iris/v12/mvc"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -60,6 +61,11 @@ func init() {
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
corsOptions := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
AllowCredentials: true,
|
||||
})
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
@@ -72,6 +78,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
defer database.Close()
|
||||
bridgeApp := iris.New()
|
||||
bridgeApp.UseRouter(corsOptions)
|
||||
bridgeApp.Get("/ping", pong).Describe("healthcheck")
|
||||
|
||||
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
|
||||
|
||||
@@ -2,28 +2,28 @@
|
||||
"l1": {
|
||||
"confirmation": 64,
|
||||
"endpoint": "https://rpc.ankr.com/eth_goerli",
|
||||
"startHeight": 9890194 ,
|
||||
"startHeight": 9090194 ,
|
||||
"blockTime": 10,
|
||||
"MessengerAddr": "0x5260e38080BFe97e6C4925d9209eCc5f964373b6",
|
||||
"ETHGatewayAddr": "0x429b73A21cF3BF1f3E696a21A95408161daF311f",
|
||||
"WETHGatewayAddr": "0x8be69E499D8848DfFb4cF9bac909f3e2cF2FeFa0",
|
||||
"StandardERC20Gateway": "0xeF37207c1A1efF6D6a9d7BfF3cF4270e406d319b",
|
||||
"CustomERC20GatewayAddr": "0x920f906B814597cF5DC76F95100F09CBAF9c5748",
|
||||
"ERC721GatewayAddr": "0x1C441Dfc5C2eD7A2AA8636748A664E59CB029157",
|
||||
"ERC1155GatewayAddr": "0xd1bE599aaCBC21448fD6373bbc7c1b4c7806f135"
|
||||
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
|
||||
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
|
||||
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
|
||||
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
|
||||
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
|
||||
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
|
||||
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
|
||||
},
|
||||
"l2": {
|
||||
"confirmation": 1,
|
||||
"endpoint": "https://alpha-rpc.scroll.io/l2",
|
||||
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
|
||||
"blockTime": 3,
|
||||
"startHeight": 1900068,
|
||||
"CustomERC20GatewayAddr": "0xa07Cb742657294C339fB4d5d6CdF3fdBeE8C1c68",
|
||||
"ERC721GatewayAddr": "0x8Fee20e0C0Ef16f2898a8073531a857D11b9C700",
|
||||
"StandardERC20Gateway": "0xB878F37BB278bf0e4974856fFe86f5e6F66BD725",
|
||||
"MessengerAddr": "0xb75d7e84517e1504C151B270255B087Fd746D34C",
|
||||
"ETHGatewayAddr": "0x32139B5C8838E94fFcD83E60dff95Daa7F0bA14c",
|
||||
"WETHGatewayAddr": "0xBb88bF582F2BBa46702621dae5CB9271057bC85b",
|
||||
"ERC1155GatewayAddr": "0x2946cB860028276b3C4bccE1767841641C2E0828"
|
||||
"startHeight": 0,
|
||||
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
|
||||
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
|
||||
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
|
||||
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
|
||||
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
|
||||
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
|
||||
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
|
||||
|
||||
@@ -16,7 +16,7 @@ type QueryHashController struct {
|
||||
}
|
||||
|
||||
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
message, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
if err != nil {
|
||||
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
|
||||
}
|
||||
@@ -24,7 +24,7 @@ func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.Qu
|
||||
return &model.QueryByAddressResponse{Message: "ok",
|
||||
Data: &model.Data{
|
||||
Result: message,
|
||||
Total: len(message),
|
||||
Total: total,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -49,20 +49,20 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION delete_at_trigger()
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE cross_message SET delete_at = NOW() WHERE id = NEW.id;
|
||||
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_at_trigger
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON cross_message
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION delete_at_trigger();
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
@@ -31,20 +31,20 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION delete_at_trigger()
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE relayed_msg SET delete_at = NOW() WHERE id = NEW.id;
|
||||
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_at_trigger
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON relayed_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION delete_at_trigger();
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
|
||||
@@ -16,6 +16,7 @@ type OrmFactory interface {
|
||||
orm.L1CrossMsgOrm
|
||||
orm.L2CrossMsgOrm
|
||||
orm.RelayedMsgOrm
|
||||
GetTotalCrossMsgCountByAddress(sender string) (uint64, error)
|
||||
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
|
||||
GetDB() *sqlx.DB
|
||||
Beginx() (*sqlx.Tx, error)
|
||||
@@ -59,6 +60,15 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
return o.DB.Beginx()
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.7
|
||||
|
||||
@@ -242,6 +242,8 @@ github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
|
||||
|
||||
@@ -4,7 +4,7 @@ import "bridge-history-api/service"
|
||||
|
||||
type Data struct {
|
||||
Result []*service.TxHistoryInfo `json:"result"`
|
||||
Total int `json:"total"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
type QueryByAddressResponse struct {
|
||||
|
||||
@@ -32,7 +32,7 @@ type TxHistoryInfo struct {
|
||||
|
||||
// HistoryService example service.
|
||||
type HistoryService interface {
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error)
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
|
||||
}
|
||||
|
||||
@@ -69,15 +69,19 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
|
||||
|
||||
}
|
||||
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error) {
|
||||
txHistories := make([]*TxHistoryInfo, 0)
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
for _, msg := range result {
|
||||
txHistory := &TxHistoryInfo{
|
||||
Hash: msg.MsgHash,
|
||||
Hash: msg.Layer1Hash + msg.Layer2Hash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
@@ -91,7 +95,7 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
}
|
||||
return txHistories, nil
|
||||
return txHistories, total, nil
|
||||
}
|
||||
|
||||
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,7 +1,9 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -59,3 +61,48 @@ func ComputeMessageHash(
|
||||
data, _ := backendabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
|
||||
var batchIndices []uint64
|
||||
var startBlocks []uint64
|
||||
var finishBlocks []uint64
|
||||
if bytes.Equal(calldata[0:4], common.Hex2Bytes("cb905499")) {
|
||||
// commitBatches
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatches"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
args := make([]backendabi.IScrollChainBatch, len(values))
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
batchIndices = append(batchIndices, args[i].BatchIndex)
|
||||
startBlocks = append(startBlocks, args[i].Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args[i].Blocks[len(args[i].Blocks)-1].BlockNumber)
|
||||
}
|
||||
} else if bytes.Equal(calldata[0:4], common.Hex2Bytes("8c73235d")) {
|
||||
// commitBatch
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
args := backendabi.IScrollChainBatch{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
batchIndices = append(batchIndices, args.BatchIndex)
|
||||
startBlocks = append(startBlocks, args.Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args.Blocks[len(args.Blocks)-1].BlockNumber)
|
||||
} else {
|
||||
return batchIndices, startBlocks, finishBlocks, errors.New("invalid selector")
|
||||
}
|
||||
return batchIndices, startBlocks, finishBlocks, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -18,3 +19,30 @@ func TestKeccak2(t *testing.T) {
|
||||
assert.NotEqual(t, b, c)
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
|
||||
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// multiple batches
|
||||
batchIndices, startBlocks, finishBlocks, err := utils.GetBatchRangeFromCalldataV1(common.Hex2Bytes(string(calldata[:])))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(batchIndices), 5)
|
||||
assert.Equal(t, len(startBlocks), 5)
|
||||
assert.Equal(t, len(finishBlocks), 5)
|
||||
assert.Equal(t, batchIndices[0], uint64(1))
|
||||
assert.Equal(t, batchIndices[1], uint64(2))
|
||||
assert.Equal(t, batchIndices[2], uint64(3))
|
||||
assert.Equal(t, batchIndices[3], uint64(4))
|
||||
assert.Equal(t, batchIndices[4], uint64(5))
|
||||
assert.Equal(t, startBlocks[0], uint64(1))
|
||||
assert.Equal(t, startBlocks[1], uint64(6))
|
||||
assert.Equal(t, startBlocks[2], uint64(7))
|
||||
assert.Equal(t, startBlocks[3], uint64(19))
|
||||
assert.Equal(t, startBlocks[4], uint64(20))
|
||||
assert.Equal(t, finishBlocks[0], uint64(5))
|
||||
assert.Equal(t, finishBlocks[1], uint64(6))
|
||||
assert.Equal(t, finishBlocks[2], uint64(18))
|
||||
assert.Equal(t, finishBlocks[3], uint64(19))
|
||||
assert.Equal(t, finishBlocks[4], uint64(20))
|
||||
}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
${GOBIN}/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
#${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
package1=coverage.bridge.xml \
|
||||
package2=coverage.db.xml \
|
||||
package3=coverage.common.xml \
|
||||
package4=coverage.coordinator.xml
|
||||
# package5=coverage.integration.xml
|
||||
@@ -1,85 +0,0 @@
|
||||
imagePrefix = 'scrolltech'
|
||||
credentialDocker = 'dockerhub'
|
||||
TAGNAME = ''
|
||||
pipeline {
|
||||
agent any
|
||||
options {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.19'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
// LOG_DOCKER = 'true'
|
||||
}
|
||||
stages {
|
||||
stage('Tag') {
|
||||
steps {
|
||||
script {
|
||||
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
|
||||
sh "echo ${TAGNAME}"
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
environment {
|
||||
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
|
||||
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
|
||||
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
|
||||
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
|
||||
DOCKER_CREDENTIALS = credentials('dockerhub')
|
||||
}
|
||||
steps {
|
||||
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
|
||||
// Use a scripted pipeline.
|
||||
script {
|
||||
stage('Push image') {
|
||||
if (TAGNAME == ""){
|
||||
return;
|
||||
}
|
||||
sh "docker login --username=$dockerUser --password=$dockerPassword"
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
|
||||
script {
|
||||
try {
|
||||
sh "docker manifest inspect scrolltech/bridge:$TAGNAME > /dev/null"
|
||||
} catch (e) {
|
||||
// only build if the tag non existed
|
||||
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
|
||||
sh "make -C bridge docker"
|
||||
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
|
||||
sh "docker push scrolltech/bridge:${TAGNAME}"
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
|
||||
script {
|
||||
try {
|
||||
sh "docker manifest inspect scrolltech/coordinator:$TAGNAME > /dev/null"
|
||||
} catch (e) {
|
||||
// only build if the tag non existed
|
||||
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
|
||||
sh "make -C coordinator docker"
|
||||
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
|
||||
sh "docker push scrolltech/coordinator:${TAGNAME}"
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
38
codecov.yml
Normal file
38
codecov.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
flag_management:
|
||||
default_rules:
|
||||
carryforward: true
|
||||
individual_flags:
|
||||
- name: bridge
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: bridge-history-api
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: common
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: coordinator
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: database
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
- name: roller
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
threshold: 1%
|
||||
3710
common/testdata/blockTrace_05.json
vendored
Normal file
3710
common/testdata/blockTrace_05.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3678
common/testdata/blockTrace_06.json
vendored
Normal file
3678
common/testdata/blockTrace_06.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3662
common/testdata/blockTrace_07.json
vendored
Normal file
3662
common/testdata/blockTrace_07.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -17,46 +18,99 @@ type BatchHeader struct {
|
||||
totalL1MessagePopped uint64
|
||||
dataHash common.Hash
|
||||
parentBatchHash common.Hash
|
||||
skippedL1MessageBitmap []*big.Int // LSB is the first L1 message
|
||||
skippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// NewBatchHeader creates a new BatchHeader
|
||||
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
|
||||
// TODO calculate `l1MessagePopped`, `totalL1MessagePopped`, and `skippedL1MessageBitmap` based on `chunks`
|
||||
// buffer for storing chunk hashes in order to compute the batch data hash
|
||||
var dataBytes []byte
|
||||
|
||||
// skipped L1 message bitmap, an array of 256-bit bitmaps
|
||||
var skippedBitmap []*big.Int
|
||||
|
||||
// the first queue index that belongs to this batch
|
||||
baseIndex := totalL1MessagePoppedBefore
|
||||
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range chunks {
|
||||
// Build dataHash
|
||||
chunkBytes, err := chunk.Hash()
|
||||
// build data hash
|
||||
totalL1MessagePoppedBeforeChunk := nextIndex
|
||||
chunkBytes, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataBytes = append(dataBytes, chunkBytes...)
|
||||
|
||||
// build skip bitmap
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != 0x7E {
|
||||
continue
|
||||
}
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
|
||||
quo := int((skippedIndex - baseIndex) / 256)
|
||||
rem := int((skippedIndex - baseIndex) % 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
|
||||
}
|
||||
|
||||
// process included message
|
||||
quo := int((currentIndex - baseIndex) / 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
|
||||
nextIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compute data hash
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
// compute skipped bitmap
|
||||
bitmapBytes := make([]byte, len(skippedBitmap)*32)
|
||||
for ii, num := range skippedBitmap {
|
||||
bytes := num.Bytes()
|
||||
padding := 32 - len(bytes)
|
||||
copy(bitmapBytes[32*ii+padding:], bytes)
|
||||
}
|
||||
|
||||
return &BatchHeader{
|
||||
version: version,
|
||||
batchIndex: batchIndex,
|
||||
l1MessagePopped: 0, // TODO
|
||||
totalL1MessagePopped: totalL1MessagePoppedBefore, // TODO
|
||||
l1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
|
||||
totalL1MessagePopped: nextIndex,
|
||||
dataHash: dataHash,
|
||||
parentBatchHash: parentBatchHash,
|
||||
skippedL1MessageBitmap: nil, // TODO
|
||||
skippedL1MessageBitmap: bitmapBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
|
||||
func (b *BatchHeader) Encode() []byte {
|
||||
batchBytes := make([]byte, 89)
|
||||
batchBytes := make([]byte, 89+len(b.skippedL1MessageBitmap))
|
||||
batchBytes[0] = b.version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.dataHash[:])
|
||||
copy(batchBytes[57:], b.parentBatchHash[:])
|
||||
// TODO: encode skippedL1MessageBitmap
|
||||
|
||||
copy(batchBytes[89:], b.skippedL1MessageBitmap[:])
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -32,9 +33,100 @@ func TestNewBatchHeader(t *testing.T) {
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// 1 L1 Msg in 1 bitmap
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff" // skip first 10
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 37, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(5), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000" // all bits are included, so none are skipped
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(42), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff" // skipped the first 37 messages
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many sparse L1 Msgs in 1 bitmap
|
||||
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock4 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace4, wrappedBlock4))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock4,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(10), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd" // 0111011101
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many L1 Msgs in each of 2 bitmaps
|
||||
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock5 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace5, wrappedBlock5))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock5,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(257), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 64, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
}
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -60,9 +152,28 @@ func TestBatchHeaderEncode(t *testing.T) {
|
||||
bytes := batchHeader.Encode()
|
||||
assert.Equal(t, 89, len(bytes))
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes = batchHeader.Encode()
|
||||
assert.Equal(t, 121, len(bytes))
|
||||
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
|
||||
}
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -103,4 +214,21 @@ func TestBatchHeaderHash(t *testing.T) {
|
||||
assert.NotNil(t, batchHeader2)
|
||||
hash2 := batchHeader2.Hash()
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash = batchHeader.Hash()
|
||||
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
|
||||
}
|
||||
|
||||
@@ -17,8 +17,25 @@ type WrappedBlock struct {
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var lastQueueIndex *uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == 0x7E {
|
||||
lastQueueIndex = &txData.Nonce
|
||||
}
|
||||
}
|
||||
if lastQueueIndex == nil {
|
||||
return 0
|
||||
}
|
||||
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
|
||||
// TODO: cache results
|
||||
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
|
||||
}
|
||||
|
||||
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
|
||||
func (w *WrappedBlock) Encode() ([]byte, error) {
|
||||
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
bytes := make([]byte, 60)
|
||||
|
||||
if !w.Header.Number.IsUint64() {
|
||||
@@ -27,14 +44,10 @@ func (w *WrappedBlock) Encode() ([]byte, error) {
|
||||
if len(w.Transactions) > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
var numL1Messages uint16
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == 0x7E {
|
||||
if numL1Messages == math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
numL1Messages++
|
||||
}
|
||||
|
||||
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
if numL1Messages > math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
|
||||
@@ -42,7 +55,7 @@ func (w *WrappedBlock) Encode() ([]byte, error) {
|
||||
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
|
||||
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
|
||||
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
|
||||
binary.BigEndian.PutUint16(bytes[58:], numL1Messages)
|
||||
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
@@ -17,8 +17,21 @@ type Chunk struct {
|
||||
Blocks []*WrappedBlock `json:"blocks"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this chunk.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (c *Chunk) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var numL1Messages uint64
|
||||
for _, block := range c.Blocks {
|
||||
numL1MessagesInBlock := block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
numL1Messages += numL1MessagesInBlock
|
||||
totalL1MessagePoppedBefore += numL1MessagesInBlock
|
||||
}
|
||||
// TODO: cache results
|
||||
return numL1Messages
|
||||
}
|
||||
|
||||
// Encode encodes the Chunk into RollupV2 Chunk Encoding.
|
||||
func (c *Chunk) Encode() ([]byte, error) {
|
||||
func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
numBlocks := len(c.Blocks)
|
||||
|
||||
if numBlocks > 255 {
|
||||
@@ -34,10 +47,11 @@ func (c *Chunk) Encode() ([]byte, error) {
|
||||
var l2TxDataBytes []byte
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
blockBytes, err := block.Encode()
|
||||
blockBytes, err := block.Encode(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode block: %v", err)
|
||||
}
|
||||
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
|
||||
if len(blockBytes) != 60 {
|
||||
return nil, fmt.Errorf("block encoding is not 60 bytes long %x", len(blockBytes))
|
||||
@@ -77,8 +91,8 @@ func (c *Chunk) Encode() ([]byte, error) {
|
||||
}
|
||||
|
||||
// Hash hashes the Chunk into RollupV2 Chunk Hash
|
||||
func (c *Chunk) Hash() ([]byte, error) {
|
||||
chunkBytes, err := c.Encode()
|
||||
func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
chunkBytes, err := c.Encode(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Encode()
|
||||
bytes, err := chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
@@ -26,7 +26,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
chunk.Blocks = append(chunk.Blocks, &WrappedBlock{})
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
@@ -37,12 +37,13 @@ func TestChunkEncode(t *testing.T) {
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 299, len(bytes))
|
||||
@@ -54,16 +55,17 @@ func TestChunkEncode(t *testing.T) {
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 97, len(bytes))
|
||||
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000100000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
|
||||
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
|
||||
chunk = &Chunk{
|
||||
@@ -72,11 +74,11 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 193, len(bytes))
|
||||
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a120000020001000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000100000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
}
|
||||
|
||||
func TestChunkHash(t *testing.T) {
|
||||
@@ -84,7 +86,7 @@ func TestChunkHash(t *testing.T) {
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Hash()
|
||||
bytes, err := chunk.Hash(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
@@ -99,7 +101,7 @@ func TestChunkHash(t *testing.T) {
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
bytes, err = chunk.Hash(0)
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hexString)
|
||||
@@ -115,7 +117,7 @@ func TestChunkHash(t *testing.T) {
|
||||
wrappedBlock1,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
bytes, err = chunk.Hash(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "aa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hexString)
|
||||
@@ -131,7 +133,7 @@ func TestChunkHash(t *testing.T) {
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
bytes, err = chunk.Hash(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hexString)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v3.3.4"
|
||||
var tag = "v3.3.7"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -63,28 +63,6 @@ Initialize the storage of L1ScrollMessenger.
|
||||
| _rollup | address | The address of ScrollChain contract. |
|
||||
| _messageQueue | address | The address of L1MessageQueue contract. |
|
||||
|
||||
### isL1MessageRelayed
|
||||
|
||||
```solidity
|
||||
function isL1MessageRelayed(bytes32) external view returns (bool)
|
||||
```
|
||||
|
||||
Mapping from relay id to relay status.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### isL1MessageSent
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -2,7 +2,16 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, constants } from "ethers";
|
||||
import { concat, getAddress, hexlify, keccak256, randomBytes, RLP } from "ethers/lib/utils";
|
||||
import {
|
||||
concat,
|
||||
getAddress,
|
||||
hexlify,
|
||||
keccak256,
|
||||
randomBytes,
|
||||
RLP,
|
||||
stripZeros,
|
||||
TransactionTypes,
|
||||
} from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { L1MessageQueue, L2GasPriceOracle } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
@@ -94,8 +103,8 @@ describe("L1MessageQueue", async () => {
|
||||
|
||||
context("#computeTransactionHash", async () => {
|
||||
it("should succeed", async () => {
|
||||
const sender = hexlify(randomBytes(20));
|
||||
const target = hexlify(randomBytes(20));
|
||||
const sender = "0xb2a70fab1a45b1b9be443b6567849a1702bc1232";
|
||||
const target = "0xcb18150e4efefb6786130e289a5f61a82a5b86d7";
|
||||
const transactionType = "0x7E";
|
||||
|
||||
for (const nonce of [
|
||||
@@ -123,19 +132,30 @@ describe("L1MessageQueue", async () => {
|
||||
constants.MaxUint256,
|
||||
]) {
|
||||
for (const dataLen of [0, 1, 2, 3, 4, 55, 56, 100]) {
|
||||
const data = randomBytes(dataLen);
|
||||
const transactionPayload = RLP.encode([
|
||||
nonce.toHexString(),
|
||||
gasLimit.toHexString(),
|
||||
target,
|
||||
value.toHexString(),
|
||||
data,
|
||||
sender,
|
||||
]);
|
||||
const payload = concat([transactionType, transactionPayload]);
|
||||
const expectedHash = keccak256(payload);
|
||||
const computedHash = await queue.computeTransactionHash(sender, nonce, value, target, gasLimit, data);
|
||||
expect(expectedHash).to.eq(computedHash);
|
||||
const tests = [randomBytes(dataLen)];
|
||||
if (dataLen === 1) {
|
||||
for (const byte of [0, 1, 127, 128]) {
|
||||
tests.push(Uint8Array.from([byte]));
|
||||
}
|
||||
}
|
||||
for (const data of tests) {
|
||||
const transactionPayload = RLP.encode([
|
||||
stripZeros(nonce.toHexString()),
|
||||
stripZeros(gasLimit.toHexString()),
|
||||
target,
|
||||
stripZeros(value.toHexString()),
|
||||
data,
|
||||
sender,
|
||||
]);
|
||||
const payload = concat([transactionType, transactionPayload]);
|
||||
const expectedHash = keccak256(payload);
|
||||
const computedHash = await queue.computeTransactionHash(sender, nonce, value, target, gasLimit, data);
|
||||
if (computedHash !== expectedHash) {
|
||||
console.log(hexlify(transactionPayload));
|
||||
console.log(nonce, gasLimit, target, value, data, sender);
|
||||
}
|
||||
expect(expectedHash).to.eq(computedHash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,6 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from relay id to relay status.
|
||||
mapping(bytes32 => bool) public isL1MessageRelayed;
|
||||
|
||||
/// @notice Mapping from L1 message hash to sent status.
|
||||
mapping(bytes32 => bool) public isL1MessageSent;
|
||||
|
||||
@@ -45,28 +42,6 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
|
||||
/// @notice The address of L1MessageQueue contract.
|
||||
address public messageQueue;
|
||||
|
||||
// @note move to ScrollMessengerBase in next big refactor
|
||||
/// @dev The status of for non-reentrant check.
|
||||
uint256 private _lock_status;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier nonReentrant() {
|
||||
// On the first call to nonReentrant, _notEntered will be true
|
||||
require(_lock_status != _ENTERED, "ReentrancyGuard: reentrant call");
|
||||
|
||||
// Any calls to nonReentrant after this point will fail
|
||||
_lock_status = _ENTERED;
|
||||
|
||||
_;
|
||||
|
||||
// By storing the original value once again, a refund is triggered (see
|
||||
// https://eips.ethereum.org/EIPS/eip-2200)
|
||||
_lock_status = _NOT_ENTERED;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -162,9 +137,6 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
|
||||
} else {
|
||||
emit FailedRelayedMessage(_xDomainCalldataHash);
|
||||
}
|
||||
|
||||
bytes32 _relayId = keccak256(abi.encodePacked(_xDomainCalldataHash, msg.sender, block.number));
|
||||
isL1MessageRelayed[_relayId] = true;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1ScrollMessenger
|
||||
|
||||
@@ -139,18 +139,27 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
}
|
||||
}
|
||||
|
||||
function store_uint(_ptr, v) -> ptr {
|
||||
// This is used for both store uint and single byte.
|
||||
// Integer zero is special handled by geth to encode as `0x80`
|
||||
function store_uint_or_byte(_ptr, v, is_uint) -> ptr {
|
||||
ptr := _ptr
|
||||
switch lt(v, 128)
|
||||
case 1 {
|
||||
// single byte in the [0x00, 0x7f]
|
||||
mstore(ptr, shl(248, v))
|
||||
switch and(iszero(v), is_uint)
|
||||
case 1 {
|
||||
// integer 0
|
||||
mstore8(ptr, 0x80)
|
||||
}
|
||||
default {
|
||||
// single byte in the [0x00, 0x7f]
|
||||
mstore8(ptr, v)
|
||||
}
|
||||
ptr := add(ptr, 1)
|
||||
}
|
||||
default {
|
||||
// 1-32 bytes long
|
||||
let len := get_uint_bytes(v)
|
||||
mstore(ptr, shl(248, add(len, 0x80)))
|
||||
mstore8(ptr, add(len, 0x80))
|
||||
ptr := add(ptr, 1)
|
||||
mstore(ptr, shl(mul(8, sub(32, len)), v))
|
||||
ptr := add(ptr, len)
|
||||
@@ -160,7 +169,7 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
function store_address(_ptr, v) -> ptr {
|
||||
ptr := _ptr
|
||||
// 20 bytes long
|
||||
mstore(ptr, shl(248, 0x94)) // 0x80 + 0x14
|
||||
mstore8(ptr, 0x94) // 0x80 + 0x14
|
||||
ptr := add(ptr, 1)
|
||||
mstore(ptr, shl(96, v))
|
||||
ptr := add(ptr, 0x14)
|
||||
@@ -170,21 +179,21 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
// 4 byte for list payload length
|
||||
let start_ptr := add(mload(0x40), 5)
|
||||
let ptr := start_ptr
|
||||
ptr := store_uint(ptr, _queueIndex)
|
||||
ptr := store_uint(ptr, _gasLimit)
|
||||
ptr := store_uint_or_byte(ptr, _queueIndex, 1)
|
||||
ptr := store_uint_or_byte(ptr, _gasLimit, 1)
|
||||
ptr := store_address(ptr, _target)
|
||||
ptr := store_uint(ptr, _value)
|
||||
ptr := store_uint_or_byte(ptr, _value, 1)
|
||||
|
||||
switch eq(_data.length, 1)
|
||||
case 1 {
|
||||
// single byte
|
||||
ptr := store_uint(ptr, shr(248, calldataload(_data.offset)))
|
||||
ptr := store_uint_or_byte(ptr, byte(0, calldataload(_data.offset)), 0)
|
||||
}
|
||||
default {
|
||||
switch lt(_data.length, 56)
|
||||
case 1 {
|
||||
// a string is 0-55 bytes long
|
||||
mstore(ptr, shl(248, add(0x80, _data.length)))
|
||||
mstore8(ptr, add(0x80, _data.length))
|
||||
ptr := add(ptr, 1)
|
||||
calldatacopy(ptr, _data.offset, _data.length)
|
||||
ptr := add(ptr, _data.length)
|
||||
@@ -192,7 +201,7 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
default {
|
||||
// a string is more than 55 bytes long
|
||||
let len_bytes := get_uint_bytes(_data.length)
|
||||
mstore(ptr, shl(248, add(0xb7, len_bytes)))
|
||||
mstore8(ptr, add(0xb7, len_bytes))
|
||||
ptr := add(ptr, 1)
|
||||
mstore(ptr, shl(mul(8, sub(32, len_bytes)), _data.length))
|
||||
ptr := add(ptr, len_bytes)
|
||||
|
||||
118
contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol
Normal file
118
contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol
Normal file
@@ -0,0 +1,118 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the address of verifier is updated.
|
||||
/// @param startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param verifier The address of new verifier.
|
||||
event UpdateVerifier(uint256 startBatchIndex, address verifier);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct Verifier {
|
||||
// The start batch index for the verifier.
|
||||
uint64 startBatchIndex;
|
||||
// The address of zkevm verifier.
|
||||
address verifier;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The list of legacy zkevm verifier, sorted by batchIndex in increasing order.
|
||||
Verifier[] public legacyVerifiers;
|
||||
|
||||
/// @notice The lastest used zkevm verifier.
|
||||
Verifier public latestVerifier;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _verifier) {
|
||||
require(_verifier != address(0), "zero verifier address");
|
||||
|
||||
latestVerifier.verifier = _verifier;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
function legacyVerifiersLength() external view returns (uint256) {
|
||||
return legacyVerifiers.length;
|
||||
}
|
||||
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
function getVerifier(uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier;
|
||||
|
||||
if (_verifier.startBatchIndex > _batchIndex) {
|
||||
uint256 _length = legacyVerifiers.length;
|
||||
// In most case, only last few verifier will be used by `ScrollChain`.
|
||||
// So, we use linear search instead of binary search.
|
||||
unchecked {
|
||||
for (uint256 i = _length; i > 0; --i) {
|
||||
_verifier = legacyVerifiers[i - 1];
|
||||
if (_verifier.startBatchIndex <= _batchIndex) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return _verifier.verifier;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IRollupVerifier
|
||||
function verifyAggregateProof(
|
||||
uint256 _batchIndex,
|
||||
bytes calldata _aggrProof,
|
||||
bytes32 _publicInputHash
|
||||
) external view override {
|
||||
address _verifier = getVerifier(_batchIndex);
|
||||
|
||||
IZkEvmVerifier(_verifier).verify(_aggrProof, _publicInputHash);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(uint64 _startBatchIndex, address _verifier) external onlyOwner {
|
||||
Verifier memory _latestVerifier = latestVerifier;
|
||||
require(_startBatchIndex >= _latestVerifier.startBatchIndex, "start batch index too small");
|
||||
require(_verifier != address(0), "zero verifier address");
|
||||
|
||||
if (_latestVerifier.startBatchIndex < _startBatchIndex) {
|
||||
legacyVerifiers.push(_latestVerifier);
|
||||
_latestVerifier.startBatchIndex = _startBatchIndex;
|
||||
}
|
||||
_latestVerifier.verifier = _verifier;
|
||||
|
||||
latestVerifier = _latestVerifier;
|
||||
|
||||
emit UpdateVerifier(_startBatchIndex, _verifier);
|
||||
}
|
||||
}
|
||||
@@ -130,12 +130,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @dev Although `_withdrawRoot` is always zero, we add this parameter for the convenience of unit testing.
|
||||
function importGenesisBatch(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _stateRoot,
|
||||
bytes32 _withdrawRoot
|
||||
) external {
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
require(_stateRoot != bytes32(0), "zero state root");
|
||||
|
||||
@@ -157,10 +152,9 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
committedBatches[0] = _batchHash;
|
||||
finalizedStateRoots[0] = _stateRoot;
|
||||
withdrawRoots[0] = _withdrawRoot;
|
||||
|
||||
emit CommitBatch(_batchHash);
|
||||
emit FinalizeBatch(_batchHash, _stateRoot, _withdrawRoot);
|
||||
emit FinalizeBatch(_batchHash, _stateRoot, bytes32(0));
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
@@ -314,7 +308,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
);
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_batchIndex, _aggrProof, _publicInputHash);
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
|
||||
@@ -62,28 +62,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
|
||||
/// @notice The maximum number of times each L1 message can fail on L2.
|
||||
uint256 public maxFailedExecutionTimes;
|
||||
|
||||
// @note move to ScrollMessengerBase in next big refactor
|
||||
/// @dev The status of for non-reentrant check.
|
||||
uint256 private _lock_status;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier nonReentrant() {
|
||||
// On the first call to nonReentrant, _notEntered will be true
|
||||
require(_lock_status != _ENTERED, "ReentrancyGuard: reentrant call");
|
||||
|
||||
// Any calls to nonReentrant after this point will fail
|
||||
_lock_status = _ENTERED;
|
||||
|
||||
_;
|
||||
|
||||
// By storing the original value once again, a refund is triggered (see
|
||||
// https://eips.ethereum.org/EIPS/eip-2200)
|
||||
_lock_status = _NOT_ENTERED;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -126,14 +104,16 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
|
||||
require(_expectedStateRoot != bytes32(0), "Block is not imported");
|
||||
|
||||
bytes32 _storageKey;
|
||||
// `mapping(bytes32 => bool) public isL1MessageSent` is the 105-nd slot of contract `L1ScrollMessenger`.
|
||||
// `mapping(bytes32 => bool) public isL1MessageSent` is the 155-th slot of contract `L1ScrollMessenger`.
|
||||
// + 1 from `Initializable`
|
||||
// + 50 from `OwnableUpgradeable`
|
||||
// + 50 from `ContextUpgradeable`
|
||||
// + 4 from `ScrollMessengerBase`
|
||||
// + 50 from `PausableUpgradeable`
|
||||
// + 2-nd in `L1ScrollMessenger`
|
||||
// + 1-st in `L1ScrollMessenger`
|
||||
assembly {
|
||||
mstore(0x00, _msgHash)
|
||||
mstore(0x20, 105)
|
||||
mstore(0x20, 155)
|
||||
_storageKey := keccak256(0x00, 0x40)
|
||||
}
|
||||
|
||||
@@ -161,14 +141,16 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
|
||||
require(_expectedStateRoot != bytes32(0), "Block not imported");
|
||||
|
||||
bytes32 _storageKey;
|
||||
// `mapping(bytes32 => bool) public isL2MessageExecuted` is the 106-th slot of contract `L1ScrollMessenger`.
|
||||
// `mapping(bytes32 => bool) public isL2MessageExecuted` is the 156-th slot of contract `L1ScrollMessenger`.
|
||||
// + 1 from `Initializable`
|
||||
// + 50 from `OwnableUpgradeable`
|
||||
// + 50 from `ContextUpgradeable`
|
||||
// + 4 from `ScrollMessengerBase`
|
||||
// + 50 from `PausableUpgradeable`
|
||||
// + 3-rd in `L1ScrollMessenger`
|
||||
// + 2-nd in `L1ScrollMessenger`
|
||||
assembly {
|
||||
mstore(0x00, _msgHash)
|
||||
mstore(0x20, 106)
|
||||
mstore(0x20, 156)
|
||||
_storageKey := keccak256(0x00, 0x40)
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,8 @@ contract WETH9 {
|
||||
balanceOf[msg.sender] -= wad;
|
||||
}
|
||||
|
||||
payable(msg.sender).transfer(wad);
|
||||
(bool success, ) = msg.sender.call{value:wad}("");
|
||||
require(success, "withdraw ETH failed");
|
||||
|
||||
emit Withdrawal(msg.sender, wad);
|
||||
}
|
||||
|
||||
@@ -38,6 +38,28 @@ abstract contract ScrollMessengerBase is OwnableUpgradeable, IScrollMessenger {
|
||||
/// @notice The address of fee vault, collecting cross domain messaging fee.
|
||||
address public feeVault;
|
||||
|
||||
// @note move to ScrollMessengerBase in next big refactor
|
||||
/// @dev The status of for non-reentrant check.
|
||||
uint256 private _lock_status;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier nonReentrant() {
|
||||
// On the first call to nonReentrant, _notEntered will be true
|
||||
require(_lock_status != _ENTERED, "ReentrancyGuard: reentrant call");
|
||||
|
||||
// Any calls to nonReentrant after this point will fail
|
||||
_lock_status = _ENTERED;
|
||||
|
||||
_;
|
||||
|
||||
// By storing the original value once again, a refund is triggered (see
|
||||
// https://eips.ethereum.org/EIPS/eip-2200)
|
||||
_lock_status = _NOT_ENTERED;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
@@ -4,7 +4,12 @@ pragma solidity ^0.8.0;
|
||||
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
/// @param aggrProof The aggregated proof.
|
||||
/// @param publicInputHash The public input hash.
|
||||
function verifyAggregateProof(bytes calldata aggrProof, bytes32 publicInputHash) external view;
|
||||
function verifyAggregateProof(
|
||||
uint256 batchIndex,
|
||||
bytes calldata aggrProof,
|
||||
bytes32 publicInputHash
|
||||
) external view;
|
||||
}
|
||||
|
||||
10
contracts/src/libraries/verifier/IZkEvmVerifier.sol
Normal file
10
contracts/src/libraries/verifier/IZkEvmVerifier.sol
Normal file
@@ -0,0 +1,10 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IZkEvmVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param aggrProof The aggregated proof.
|
||||
/// @param publicInputHash The public input hash.
|
||||
function verify(bytes calldata aggrProof, bytes32 publicInputHash) external view;
|
||||
}
|
||||
@@ -12,6 +12,10 @@ import {Whitelist} from "../L2/predeploys/Whitelist.sol";
|
||||
import {L1ScrollMessenger} from "../L1/L1ScrollMessenger.sol";
|
||||
import {L2ScrollMessenger} from "../L2/L2ScrollMessenger.sol";
|
||||
|
||||
import {MockRollupVerifier} from "./mocks/MockRollupVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
// from L1MessageQueue
|
||||
event QueueTransaction(
|
||||
@@ -44,6 +48,8 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
EnforcedTxGateway internal enforcedTxGateway;
|
||||
ScrollChain internal rollup;
|
||||
|
||||
MockRollupVerifier internal verifier;
|
||||
|
||||
address internal feeVault;
|
||||
Whitelist private whitelist;
|
||||
|
||||
@@ -59,6 +65,7 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
rollup = new ScrollChain(1233);
|
||||
enforcedTxGateway = new EnforcedTxGateway();
|
||||
whitelist = new Whitelist(address(this));
|
||||
verifier = new MockRollupVerifier();
|
||||
|
||||
// Deploy L2 contracts
|
||||
l2Messenger = new L2ScrollMessenger(address(0), address(0), address(0));
|
||||
@@ -74,7 +81,7 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
);
|
||||
gasOracle.initialize(0, 0, 0, 0);
|
||||
gasOracle.updateWhitelist(address(whitelist));
|
||||
rollup.initialize(address(messageQueue), address(0), 44);
|
||||
rollup.initialize(address(messageQueue), address(verifier), 44);
|
||||
|
||||
address[] memory _accounts = new address[](1);
|
||||
_accounts[0] = address(this);
|
||||
@@ -82,11 +89,40 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
}
|
||||
|
||||
function prepareL2MessageRoot(bytes32 messageHash) internal {
|
||||
bytes memory _batchHeader = new bytes(89);
|
||||
rollup.updateSequencer(address(this), true);
|
||||
rollup.updateProver(address(this), true);
|
||||
|
||||
// import genesis batch
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(_batchHeader, add(0x20, 25)), 1)
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
}
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)));
|
||||
bytes32 batchHash0 = rollup.committedBatches(0);
|
||||
|
||||
// commit one batch
|
||||
bytes[] memory chunks = new bytes[](1);
|
||||
bytes memory chunk0 = new bytes(1 + 60);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
|
||||
bytes memory batchHeader1 = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(batchHeader1, 0x20), 0) // version
|
||||
mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex
|
||||
mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped
|
||||
mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
|
||||
}
|
||||
|
||||
rollup.importGenesisBatch(_batchHeader, bytes32(uint256(1)), messageHash);
|
||||
rollup.finalizeBatchWithProof(
|
||||
batchHeader1,
|
||||
bytes32(uint256(1)),
|
||||
bytes32(uint256(2)),
|
||||
messageHash,
|
||||
new bytes(0)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,81 +12,47 @@ import {Whitelist} from "../L2/predeploys/Whitelist.sol";
|
||||
import {IL1ScrollMessenger, L1ScrollMessenger} from "../L1/L1ScrollMessenger.sol";
|
||||
import {L2ScrollMessenger} from "../L2/L2ScrollMessenger.sol";
|
||||
|
||||
contract L1ScrollMessengerTest is DSTestPlus {
|
||||
L2ScrollMessenger internal l2Messenger;
|
||||
|
||||
address internal feeVault;
|
||||
|
||||
L1ScrollMessenger internal l1Messenger;
|
||||
ScrollChain internal scrollChain;
|
||||
L1MessageQueue internal l1MessageQueue;
|
||||
L2GasPriceOracle internal gasOracle;
|
||||
EnforcedTxGateway internal enforcedTxGateway;
|
||||
Whitelist internal whitelist;
|
||||
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
|
||||
|
||||
contract L1ScrollMessengerTest is L1GatewayTestBase {
|
||||
function setUp() public {
|
||||
// Deploy L2 contracts
|
||||
l2Messenger = new L2ScrollMessenger(address(0), address(0), address(0));
|
||||
|
||||
// Deploy L1 contracts
|
||||
scrollChain = new ScrollChain(0);
|
||||
l1MessageQueue = new L1MessageQueue();
|
||||
l1Messenger = new L1ScrollMessenger();
|
||||
gasOracle = new L2GasPriceOracle();
|
||||
enforcedTxGateway = new EnforcedTxGateway();
|
||||
whitelist = new Whitelist(address(this));
|
||||
|
||||
// Initialize L1 contracts
|
||||
l1Messenger.initialize(address(l2Messenger), feeVault, address(scrollChain), address(l1MessageQueue));
|
||||
l1MessageQueue.initialize(
|
||||
address(l1Messenger),
|
||||
address(scrollChain),
|
||||
address(enforcedTxGateway),
|
||||
address(gasOracle),
|
||||
10000000
|
||||
);
|
||||
gasOracle.initialize(0, 0, 0, 0);
|
||||
scrollChain.initialize(address(l1MessageQueue), address(0), 44);
|
||||
|
||||
gasOracle.updateWhitelist(address(whitelist));
|
||||
address[] memory _accounts = new address[](1);
|
||||
_accounts[0] = address(this);
|
||||
whitelist.updateWhitelistStatus(_accounts, true);
|
||||
L1GatewayTestBase.setUpBase();
|
||||
}
|
||||
|
||||
function testForbidCallMessageQueueFromL2() external {
|
||||
// import genesis batch
|
||||
bytes memory _batchHeader = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(_batchHeader, add(0x20, 25)), 1)
|
||||
}
|
||||
scrollChain.importGenesisBatch(
|
||||
_batchHeader,
|
||||
bytes32(uint256(1)),
|
||||
bytes32(0x3152134c22e545ab5d345248502b4f04ef5b45f735f939c7fe6ddc0ffefc9c52)
|
||||
bytes32 _xDomainCalldataHash = keccak256(
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
address(this),
|
||||
address(messageQueue),
|
||||
0,
|
||||
0,
|
||||
new bytes(0)
|
||||
)
|
||||
);
|
||||
prepareL2MessageRoot(_xDomainCalldataHash);
|
||||
|
||||
IL1ScrollMessenger.L2MessageProof memory proof;
|
||||
proof.batchIndex = scrollChain.lastFinalizedBatchIndex();
|
||||
proof.batchIndex = rollup.lastFinalizedBatchIndex();
|
||||
|
||||
hevm.expectRevert("Forbid to call message queue");
|
||||
l1Messenger.relayMessageWithProof(address(this), address(l1MessageQueue), 0, 0, new bytes(0), proof);
|
||||
l1Messenger.relayMessageWithProof(address(this), address(messageQueue), 0, 0, new bytes(0), proof);
|
||||
}
|
||||
|
||||
function testForbidCallSelfFromL2() external {
|
||||
// import genesis batch
|
||||
bytes memory _batchHeader = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(_batchHeader, 57), 1)
|
||||
}
|
||||
scrollChain.importGenesisBatch(
|
||||
_batchHeader,
|
||||
bytes32(uint256(1)),
|
||||
bytes32(0xf7c03e2b13c88e3fca1410b228b001dd94e3f5ab4b4a4a6981d09a4eb3e5b631)
|
||||
bytes32 _xDomainCalldataHash = keccak256(
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
address(this),
|
||||
address(l1Messenger),
|
||||
0,
|
||||
0,
|
||||
new bytes(0)
|
||||
)
|
||||
);
|
||||
|
||||
prepareL2MessageRoot(_xDomainCalldataHash);
|
||||
IL1ScrollMessenger.L2MessageProof memory proof;
|
||||
proof.batchIndex = scrollChain.lastFinalizedBatchIndex();
|
||||
proof.batchIndex = rollup.lastFinalizedBatchIndex();
|
||||
|
||||
hevm.expectRevert("Forbid to call self");
|
||||
l1Messenger.relayMessageWithProof(address(this), address(l1Messenger), 0, 0, new bytes(0), proof);
|
||||
@@ -110,7 +76,9 @@ contract L1ScrollMessengerTest is DSTestPlus {
|
||||
|
||||
function testReplayMessage(uint256 exceedValue, address refundAddress) external {
|
||||
hevm.assume(refundAddress.code.length == 0);
|
||||
hevm.assume(uint256(uint160(refundAddress)) > 100); // ignore some precompile contracts
|
||||
hevm.assume(uint256(uint160(refundAddress)) > uint256(100)); // ignore some precompile contracts
|
||||
hevm.assume(refundAddress != feeVault);
|
||||
hevm.assume(refundAddress != address(0x000000000000000000636F6e736F6c652e6c6f67)); // ignore console/console2
|
||||
|
||||
exceedValue = bound(exceedValue, 1, address(this).balance / 2);
|
||||
|
||||
@@ -170,7 +138,7 @@ contract L1ScrollMessengerTest is DSTestPlus {
|
||||
l1Messenger.sendMessage{value: _fee + value}(address(0), value, hex"0011220033", gasLimit);
|
||||
|
||||
// update max gas limit
|
||||
l1MessageQueue.updateMaxGasLimit(gasLimit);
|
||||
messageQueue.updateMaxGasLimit(gasLimit);
|
||||
l1Messenger.sendMessage{value: _fee + value}(address(0), value, hex"0011220033", gasLimit);
|
||||
}
|
||||
}
|
||||
|
||||
105
contracts/src/test/MultipleVersionRollupVerifier.t.sol
Normal file
105
contracts/src/test/MultipleVersionRollupVerifier.t.sol
Normal file
@@ -0,0 +1,105 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {L1MessageQueue} from "../L1/rollup/L1MessageQueue.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
|
||||
import {MockZkEvmVerifier} from "./mocks/MockZkEvmVerifier.sol";
|
||||
|
||||
contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
// from MultipleVersionRollupVerifier
|
||||
event UpdateVerifier(uint256 startBatchIndex, address verifier);
|
||||
|
||||
MultipleVersionRollupVerifier private verifier;
|
||||
MockZkEvmVerifier private v0;
|
||||
MockZkEvmVerifier private v1;
|
||||
MockZkEvmVerifier private v2;
|
||||
|
||||
function setUp() external {
|
||||
v0 = new MockZkEvmVerifier();
|
||||
v1 = new MockZkEvmVerifier();
|
||||
v2 = new MockZkEvmVerifier();
|
||||
|
||||
verifier = new MultipleVersionRollupVerifier(address(v0));
|
||||
}
|
||||
|
||||
function testUpdateVerifier(address _newVerifier) external {
|
||||
hevm.assume(_newVerifier != address(0));
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// zero verifier address, revert
|
||||
hevm.expectRevert("zero verifier address");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
|
||||
// change to random operator
|
||||
assertEq(verifier.legacyVerifiersLength(), 0);
|
||||
verifier.updateVerifier(uint64(100), _newVerifier);
|
||||
assertEq(verifier.legacyVerifiersLength(), 1);
|
||||
(uint64 _startBatchIndex, address _verifier) = verifier.latestVerifier();
|
||||
assertEq(_startBatchIndex, uint64(100));
|
||||
assertEq(_verifier, _newVerifier);
|
||||
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
|
||||
assertEq(_startBatchIndex, uint64(0));
|
||||
assertEq(_verifier, address(v0));
|
||||
|
||||
// change to same batch index
|
||||
verifier.updateVerifier(uint64(100), address(v1));
|
||||
(_startBatchIndex, _verifier) = verifier.latestVerifier();
|
||||
assertEq(_startBatchIndex, uint64(100));
|
||||
assertEq(_verifier, address(v1));
|
||||
(_startBatchIndex, _verifier) = verifier.legacyVerifiers(0);
|
||||
assertEq(_startBatchIndex, uint64(0));
|
||||
assertEq(_verifier, address(v0));
|
||||
|
||||
// start batch index too small, revert
|
||||
hevm.expectRevert("start batch index too small");
|
||||
verifier.updateVerifier(99, _newVerifier);
|
||||
}
|
||||
|
||||
function testGetVerifier() external {
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
assertEq(verifier.getVerifier(0), address(v0));
|
||||
assertEq(verifier.getVerifier(1), address(v0));
|
||||
assertEq(verifier.getVerifier(99), address(v0));
|
||||
assertEq(verifier.getVerifier(100), address(v1));
|
||||
assertEq(verifier.getVerifier(101), address(v1));
|
||||
assertEq(verifier.getVerifier(299), address(v1));
|
||||
assertEq(verifier.getVerifier(300), address(v2));
|
||||
assertEq(verifier.getVerifier(301), address(v2));
|
||||
assertEq(verifier.getVerifier(10000), address(v2));
|
||||
}
|
||||
|
||||
function testVerifyAggregateProof() external {
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(0, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(1, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v0)));
|
||||
verifier.verifyAggregateProof(99, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(100, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(101, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v1)));
|
||||
verifier.verifyAggregateProof(299, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(300, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(301, new bytes(0), bytes32(0));
|
||||
hevm.expectRevert(abi.encode(address(v2)));
|
||||
verifier.verifyAggregateProof(10000, new bytes(0), bytes32(0));
|
||||
}
|
||||
}
|
||||
@@ -53,7 +53,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
}
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)), bytes32(uint256(0)));
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)));
|
||||
|
||||
// caller not sequencer, revert
|
||||
hevm.expectRevert("caller not sequencer");
|
||||
@@ -136,7 +136,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
}
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)), bytes32(uint256(0)));
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)));
|
||||
bytes32 batchHash0 = rollup.committedBatches(0);
|
||||
|
||||
bytes[] memory chunks = new bytes[](1);
|
||||
@@ -228,7 +228,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
}
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)), bytes32(uint256(0)));
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)));
|
||||
bytes32 batchHash0 = rollup.committedBatches(0);
|
||||
|
||||
bytes memory bitmap;
|
||||
@@ -243,28 +243,28 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0001
|
||||
// 50c3caa727394b95dc4885b7d25033ed22ac772b985fb274f2a7c0699a11346d
|
||||
// a2277fd30bbbe74323309023b56035b376d7768ad237ae4fc46ead7dc9591ae1
|
||||
// => data hash for chunk0
|
||||
// bb88f47194a07d59ed17bc9b2015f83d0afea8f7892d9c5f0b6565563bf06b26
|
||||
// 9ef1e5694bdb014a1eea42be756a8f63bfd8781d6332e9ef3b5126d90c62f110
|
||||
// => data hash for all chunks
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000001
|
||||
// 0000000000000001
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// 0000000000000001
|
||||
// d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3
|
||||
// 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// => hash for batch header
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
// 00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7
|
||||
bytes memory batchHeader1 = new bytes(89 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader1, 0x20), 0) // version
|
||||
mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1
|
||||
mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0x038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0xd9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
|
||||
mstore(add(batchHeader1, add(0x20, 89)), 0) // bitmap0
|
||||
}
|
||||
@@ -280,7 +280,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
assertBoolEq(rollup.isBatchFinalized(1), false);
|
||||
bytes32 batchHash1 = rollup.committedBatches(1);
|
||||
assertEq(batchHash1, bytes32(0xcef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e));
|
||||
assertEq(batchHash1, bytes32(0x00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7));
|
||||
|
||||
// finalize batch1
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -330,26 +330,26 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// 012c
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk2
|
||||
// 5c91563ee8be18cb94accfc83728f883ff5e3aa600fd0799e0a4e39afc7970b9
|
||||
// 0520f1fbe159af97fdf1d6cfcfe7605f99f7bfe3ed876e87b64250b1810df00b
|
||||
// => data hash for all chunks
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// f52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000108
|
||||
// 0000000000000109
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// f52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa
|
||||
// => hash for batch header
|
||||
// 17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2
|
||||
// 2231cf185a5c07931584f970738b4cd2ae4fb39e2d90853b26746c7616ea71b9
|
||||
bytes memory batchHeader2 = new bytes(89 + 32 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader2, 0x20), 0) // version
|
||||
mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2
|
||||
mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264
|
||||
mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xbf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xf52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 57)), batchHash1) // parentBatchHash
|
||||
mstore(
|
||||
add(batchHeader2, add(0x20, 89)),
|
||||
@@ -398,7 +398,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
|
||||
assertBoolEq(rollup.isBatchFinalized(2), false);
|
||||
bytes32 batchHash2 = rollup.committedBatches(2);
|
||||
assertEq(batchHash2, bytes32(0x17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2));
|
||||
assertEq(batchHash2, bytes32(0x2231cf185a5c07931584f970738b4cd2ae4fb39e2d90853b26746c7616ea71b9));
|
||||
|
||||
// verify committed batch correctly
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -450,7 +450,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
}
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)), bytes32(uint256(0)));
|
||||
rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1)));
|
||||
bytes32 batchHash0 = rollup.committedBatches(0);
|
||||
|
||||
bytes[] memory chunks = new bytes[](1);
|
||||
@@ -577,52 +577,52 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// zero state root, revert
|
||||
batchHeader = new bytes(89);
|
||||
hevm.expectRevert("zero state root");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(0), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(0));
|
||||
|
||||
// batch header length too small, revert
|
||||
batchHeader = new bytes(88);
|
||||
hevm.expectRevert("batch header length too small");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
// wrong bitmap length, revert
|
||||
batchHeader = new bytes(90);
|
||||
hevm.expectRevert("wrong bitmap length");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
// not all fields are zero, revert
|
||||
batchHeader = new bytes(89);
|
||||
batchHeader[0] = bytes1(uint8(1)); // version not zero
|
||||
hevm.expectRevert("not all fields are zero");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
batchHeader = new bytes(89);
|
||||
batchHeader[1] = bytes1(uint8(1)); // batchIndex not zero
|
||||
hevm.expectRevert("not all fields are zero");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
batchHeader = new bytes(89 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader, add(0x20, 9)), shl(192, 1)) // l1MessagePopped not zero
|
||||
}
|
||||
hevm.expectRevert("not all fields are zero");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
batchHeader = new bytes(89);
|
||||
batchHeader[17] = bytes1(uint8(1)); // totalL1MessagePopped not zero
|
||||
hevm.expectRevert("not all fields are zero");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
// zero data hash, revert
|
||||
batchHeader = new bytes(89);
|
||||
hevm.expectRevert("zero data hash");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
// nonzero parent batch hash, revert
|
||||
batchHeader = new bytes(89);
|
||||
batchHeader[25] = bytes1(uint8(1)); // dataHash not zero
|
||||
batchHeader[57] = bytes1(uint8(1)); // parentBatchHash not zero
|
||||
hevm.expectRevert("nonzero parent batch hash");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
|
||||
// import correctly
|
||||
batchHeader = new bytes(89);
|
||||
@@ -630,13 +630,13 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assertEq(rollup.finalizedStateRoots(0), bytes32(0));
|
||||
assertEq(rollup.withdrawRoots(0), bytes32(0));
|
||||
assertEq(rollup.committedBatches(0), bytes32(0));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(uint256(2)));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
assertEq(rollup.finalizedStateRoots(0), bytes32(uint256(1)));
|
||||
assertEq(rollup.withdrawRoots(0), bytes32(uint256(2)));
|
||||
assertEq(rollup.withdrawRoots(0), bytes32(0));
|
||||
assertGt(uint256(rollup.committedBatches(0)), 0);
|
||||
|
||||
// Genesis batch imported, revert
|
||||
hevm.expectRevert("Genesis batch imported");
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)), bytes32(uint256(2)));
|
||||
rollup.importGenesisBatch(batchHeader, bytes32(uint256(1)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,5 +6,9 @@ import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
|
||||
contract MockRollupVerifier is IRollupVerifier {
|
||||
/// @inheritdoc IRollupVerifier
|
||||
function verifyAggregateProof(bytes calldata, bytes32) external view {}
|
||||
function verifyAggregateProof(
|
||||
uint256,
|
||||
bytes calldata,
|
||||
bytes32
|
||||
) external view {}
|
||||
}
|
||||
|
||||
14
contracts/src/test/mocks/MockZkEvmVerifier.sol
Normal file
14
contracts/src/test/mocks/MockZkEvmVerifier.sol
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
contract MockZkEvmVerifier is IZkEvmVerifier {
|
||||
event Called(address);
|
||||
|
||||
/// @inheritdoc IZkEvmVerifier
|
||||
function verify(bytes calldata, bytes32) external view {
|
||||
revert(string(abi.encode(address(this))));
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,13 @@ type RollerAPI interface {
|
||||
SubmitProof(proof *message.ProofMsg) error
|
||||
}
|
||||
|
||||
// AdminAPI for Coordinator in order to manage process.
|
||||
type AdminAPI interface {
|
||||
StartSend()
|
||||
PauseSend()
|
||||
PauseSendUntil(batchIdx uint64)
|
||||
}
|
||||
|
||||
// RequestToken generates and sends back register token for roller
|
||||
func (m *Manager) RequestToken(authMsg *message.AuthMsg) (string, error) {
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
@@ -127,3 +134,18 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartSend starts to send basic tasks.
|
||||
func (m *Manager) StartSend() {
|
||||
m.StartSendTask()
|
||||
}
|
||||
|
||||
// PauseSend pauses to send basic tasks.
|
||||
func (m *Manager) PauseSend() {
|
||||
m.PauseSendTask()
|
||||
}
|
||||
|
||||
// PauseSendUntil pause to send basic tasks until batchIdx.
|
||||
func (m *Manager) PauseSendUntil(batchIdx uint64) {
|
||||
m.PauseSendTaskUntil(batchIdx)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,9 @@ const (
|
||||
|
||||
// RollerManagerConfig loads sequencer configuration items.
|
||||
type RollerManagerConfig struct {
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
PauseSendTask bool `json:"pause_send_task"`
|
||||
PauseSendTaskUntil uint64 `json:"pause_send_task_until"`
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
|
||||
@@ -9,6 +9,7 @@ require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
@@ -133,6 +133,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
uatomic "go.uber.org/atomic"
|
||||
"golang.org/x/exp/rand"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
@@ -75,7 +76,9 @@ type Manager struct {
|
||||
cfg *config.RollerManagerConfig
|
||||
|
||||
// The indicator whether the backend is running or not.
|
||||
running int32
|
||||
running int32
|
||||
sendTaskPaused *uatomic.Bool
|
||||
pauseUntilBatchIdx *uatomic.Uint64
|
||||
|
||||
// A mutex guarding the boolean below.
|
||||
mu sync.RWMutex
|
||||
@@ -117,6 +120,8 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
|
||||
return &Manager{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
sendTaskPaused: uatomic.NewBool(cfg.PauseSendTask),
|
||||
pauseUntilBatchIdx: uatomic.NewUint64(cfg.PauseSendTaskUntil),
|
||||
rollerPool: cmap.New(),
|
||||
sessions: make(map[string]*session),
|
||||
failedSessionInfos: make(map[string]*SessionInfo),
|
||||
@@ -201,7 +206,13 @@ func (m *Manager) Loop() {
|
||||
}
|
||||
}
|
||||
// Select basic type roller and send message
|
||||
for len(tasks) > 0 && m.StartBasicProofGenerationSession(tasks[0], nil) {
|
||||
for len(tasks) > 0 {
|
||||
if m.isSendTaskPaused(tasks[0].Index) {
|
||||
break
|
||||
}
|
||||
if !m.StartBasicProofGenerationSession(tasks[0], nil) {
|
||||
break
|
||||
}
|
||||
tasks = tasks[1:]
|
||||
}
|
||||
case <-m.ctx.Done():
|
||||
@@ -559,6 +570,11 @@ func (m *Manager) APIs() []rpc.API {
|
||||
Service: RollerAPI(m),
|
||||
Public: true,
|
||||
},
|
||||
{
|
||||
Namespace: "admin",
|
||||
Service: AdminAPI(m),
|
||||
Public: true,
|
||||
},
|
||||
{
|
||||
Namespace: "debug",
|
||||
Public: true,
|
||||
@@ -567,6 +583,26 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
}
|
||||
|
||||
// StartSendTask starts to send basic tasks loop.
|
||||
func (m *Manager) StartSendTask() {
|
||||
m.sendTaskPaused.Store(false)
|
||||
}
|
||||
|
||||
// PauseSendTask pauses to send basic tasks loop.
|
||||
func (m *Manager) PauseSendTask() {
|
||||
m.sendTaskPaused.Store(true)
|
||||
}
|
||||
|
||||
// PauseSendTaskUntil pauses to send basic tasks loop until batchIdx.
|
||||
func (m *Manager) PauseSendTaskUntil(batchIdx uint64) {
|
||||
m.PauseSendTask()
|
||||
m.pauseUntilBatchIdx.Store(batchIdx)
|
||||
}
|
||||
|
||||
func (m *Manager) isSendTaskPaused(batchIdx uint64) bool {
|
||||
return m.sendTaskPaused.Load() && m.pauseUntilBatchIdx.Load() > batchIdx
|
||||
}
|
||||
|
||||
// StartBasicProofGenerationSession starts a basic proof generation session
|
||||
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
var taskID string
|
||||
|
||||
Reference in New Issue
Block a user