Compare commits

..

31 Commits

Author SHA1 Message Date
Péter Garamvölgyi
e78cff529c fix: reduce finalize batch tx frequency (#332) 2023-03-01 09:01:15 +01:00
ChuhanJin
4c0ff9306b fix(build): jenkinsfile tag job optimized and fix (#331)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-01 14:39:43 +08:00
Lawliet-Chan
669f3f45b4 Fix(zkevm): fix zkevm bug for goerli. (#334) 2023-03-01 14:28:18 +08:00
HAOYUatHZ
24c7a632f2 fix(db): fix SetMaxOpenConns (#328) 2023-02-28 15:15:15 +08:00
Haichen Shen
cc64c29f56 feat(batch proposer): add time limit to commit batches (#323) 2023-02-25 16:25:09 +08:00
HAOYUatHZ
780d6b326f fix(bridge): fix typos (#321) 2023-02-23 19:41:12 +08:00
Xi Lin
92e70432e4 feat(bridge): only update gas price oracle for exceeding diff threshold (#319) 2023-02-23 19:14:29 +08:00
HAOYUatHZ
6f3eddf773 fix(config): fix typos (#315) 2023-02-23 12:50:09 +08:00
Péter Garamvölgyi
6fcd6b1b6c fix: Flush buffered writer (#314) 2023-02-22 18:01:19 +01:00
Xi Lin
9e2f2c3e9c fix(bridge): fix batch proposer (#312)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-02-22 16:28:29 +01:00
Péter Garamvölgyi
6816a7e911 fix incorrect block order during batch recovery (#311) 2023-02-22 15:45:31 +01:00
Haichen Shen
fb7002bd6d feat(bridge): update the watcher and relayer based on the new contract (#305)
Co-authored-by: colinlyguo <651734127@qq.com>
Co-authored-by: zimpha <zimpha@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-02-22 18:15:44 +08:00
HAOYUatHZ
a90f2e6c4e chore: upgrade l2geth dependency for trace type (#304) 2023-02-21 10:40:46 +08:00
Xi Lin
7e99c5148d feat(contracts): new bridge contracts (#288)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: Thegaram <th307q@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-02-20 13:44:49 -08:00
Péter Garamvölgyi
65e0b671ff feat: import genesis batch during startup (#299)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-02-18 23:29:34 +01:00
HAOYUatHZ
3849d1bcc9 build: update version to alpha-v1.0 (#301) 2023-02-18 18:59:23 +08:00
Lawliet-Chan
f33bfffd85 feat(roller&coordinator): upgrade lizkp to zkevm-0215 version (#281)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: Ubuntu <ubuntu@ip-172-31-9-248.us-west-2.compute.internal>
2023-02-18 18:53:57 +08:00
maskpp
4ad17468d0 feat(coordinator): Enable set ws compression level. (#292) 2023-02-13 19:56:30 +08:00
maskpp
fbcabcc5e2 feat(confirmations): Upgrade confirm (#291) 2023-02-12 19:06:09 +08:00
Péter Garamvölgyi
eb3f187926 feat(contracts): Add fee vault (#223) 2023-02-11 21:24:45 +01:00
Péter Garamvölgyi
d5f0218f5f feat: allow to override L2 deployment when address is provided (#293) 2023-02-10 14:28:38 +01:00
Péter Garamvölgyi
5fdd2c609c feat(bridge): confirm block based on "safe" and "finalized" tags (#265) 2023-02-09 21:20:52 +08:00
Péter Garamvölgyi
d9bc0842cc perf(bridge): execute relayer loops independently (#258)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-02-08 09:08:09 +01:00
Xi Lin
0e88b9aa94 feat(contract): enable whitelist relayer (#272)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-02-07 14:20:34 +08:00
colin
33a912e7c1 fix(bridge): compatible with DynamicFeeTxType not supported chain (#280)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-02-06 13:47:34 +08:00
HAOYUatHZ
e48e76acdf build: add nightly-2022-12-10 rust-related builder image (#282) 2023-02-06 10:33:01 +08:00
Péter Garamvölgyi
f5d02175f8 Revert "fix: add gas multiplier (#275)" (#279) 2023-02-03 11:15:45 +01:00
Lawliet-Chan
bb76a00613 feat(libzkp): use dylib instead of staticlib (#266)
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-02-02 15:25:50 +08:00
Péter Garamvölgyi
41d71fc274 fix: add gas multiplier (#275) 2023-02-01 19:47:53 +01:00
ChuhanJin
02ea14d721 refactor(bridge): remove layer1 client in in layer1 relayer constructor (#274)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-02-01 15:44:55 +08:00
colin
ea9c1c6776 feat: add monitor metrics (#262)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-02-01 13:46:52 +08:00
244 changed files with 61791 additions and 16449 deletions

View File

@@ -5,6 +5,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'
@@ -12,6 +13,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'

View File

@@ -5,6 +5,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'common/**'
- '.github/workflows/common.yml'
@@ -12,6 +13,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'common/**'
- '.github/workflows/common.yml'
@@ -26,7 +28,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -8,6 +8,7 @@ on:
- prod
- release/*
- staging
- alpha
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
@@ -18,6 +19,7 @@ on:
- prod
- release/*
- staging
- alpha
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'

View File

@@ -5,6 +5,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'coordinator/**'
- '.github/workflows/coordinator.yml'
@@ -12,6 +13,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'coordinator/**'
- '.github/workflows/coordinator.yml'
@@ -26,7 +28,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -5,6 +5,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'database/**'
- '.github/workflows/database.yml'
@@ -12,6 +13,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'database/**'
- '.github/workflows/database.yml'

View File

@@ -5,6 +5,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
@@ -12,6 +13,7 @@ on:
branches:
- main
- staging
- alpha
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
@@ -26,7 +28,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
@@ -42,6 +44,8 @@ jobs:
- name: Test
run: |
make roller
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
export CHAIN_ID=534353
go test -v ./...
check:
runs-on: ubuntu-latest

1
.gitignore vendored
View File

@@ -3,6 +3,7 @@ assets/params*
assets/seed
coverage.txt
build/bin
*.integration.txt
# misc
sftp-config.json

2
Jenkinsfile vendored
View File

@@ -13,6 +13,8 @@ pipeline {
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
CHAIN_ID='534353'
// LOG_DOCKER = 'true'
}
stages {

View File

@@ -1,5 +1,7 @@
.PHONY: check update dev_docker clean
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -29,5 +31,15 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
clean: ## Empty out the bin folder
@rm -rf build/bin

File diff suppressed because one or more lines are too long

View File

@@ -10,69 +10,74 @@ import (
bridge_abi "scroll-tech/bridge/abi"
)
func TestPackRelayMessageWithProof(t *testing.T) {
func TestEventSignature(t *testing.T) {
assert := assert.New(t)
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
assert.Equal(bridge_abi.L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
}
func TestPackRelayL2MessageWithProof(t *testing.T) {
assert := assert.New(t)
l1MessengerABI, err := bridge_abi.L1ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(0),
BatchIndex: big.NewInt(0),
BatchHash: common.Hash{},
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBatch(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
scrollChainABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
for i := 0; i < 5; i++ {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
Caller: common.Address{},
Target: common.Address{},
Nonce: 0,
Gas: 0,
GasPrice: big.NewInt(0),
Value: big.NewInt(0),
Data: make([]byte, 0),
R: big.NewInt(0),
S: big.NewInt(0),
V: 0,
}
header := bridge_abi.IScrollChainBlockContext{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BlockNumber: 0,
Timestamp: 0,
BaseFee: big.NewInt(0),
GasLimit: 0,
NumTransactions: 0,
NumL1Messages: 0,
}
header := bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
Txs: txns,
batch := bridge_abi.IScrollChainBatch{
Blocks: []bridge_abi.IScrollChainBlockContext{header},
PrevStateRoot: common.Hash{},
NewStateRoot: common.Hash{},
WithdrawTrieRoot: common.Hash{},
BatchIndex: 0,
L2Transactions: make([]byte, 0),
}
batch := bridge_abi.IZKRollupLayer2Batch{
BatchIndex: 0,
ParentHash: common.Hash{},
Blocks: []bridge_abi.IZKRollupLayer2BlockHeader{header},
}
_, err = l1RollupABI.Pack("commitBatch", batch)
_, err = scrollChainABI.Pack("commitBatch", batch)
assert.NoError(err)
}
func TestPackFinalizeBatchWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
l1RollupABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
proof := make([]*big.Int, 10)
@@ -86,12 +91,43 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
assert.NoError(err)
}
func TestPackRelayMessage(t *testing.T) {
func TestPackRelayL1Message(t *testing.T) {
assert := assert.New(t)
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
l2MessengerABI, err := bridge_abi.L2ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0))
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
assert.NoError(err)
}
func TestPackSetL1BaseFee(t *testing.T) {
assert := assert.New(t)
l1GasOracleABI, err := bridge_abi.L1GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
_, err = l1GasOracleABI.Pack("setL1BaseFee", baseFee)
assert.NoError(err)
}
func TestPackSetL2BaseFee(t *testing.T) {
assert := assert.New(t)
l2GasOracleABI, err := bridge_abi.L2GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
assert.NoError(err)
}
func TestPackImportBlock(t *testing.T) {
assert := assert.New(t)
l1BlockContainerABI := bridge_abi.L1BlockContainerABI
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
assert.NoError(err)
}

View File

@@ -1,6 +1,7 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
@@ -10,6 +11,7 @@ import (
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
@@ -49,7 +51,10 @@ func action(ctx *cli.Context) error {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
// Start metrics server.
metrics.Serve(context.Background(), ctx)
// Init db connection.
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)

View File

@@ -1,49 +1,69 @@
{
"l1_config": {
"confirmations": 6,
"confirmations": "0x6",
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"check_pending_time": 3,
"check_pending_time": 2,
"escalate_blocks": 100,
"confirmations": 1,
"confirmations": "0x1",
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "AccessListTx",
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
}
},
"l2_config": {
"confirmations": 1,
"confirmations": "0x1",
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
"escalate_blocks": 100,
"confirmations": 6,
"confirmations": "0x6",
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "DynamicFeeTx",
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
@@ -51,17 +71,21 @@
"batch_proposer_config": {
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 135,
"batch_tx_num_threshold": 44,
"batch_time_sec": 300,
"batch_commit_time_sec": 1200,
"batch_blocks_limit": 100,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
]
"commit_tx_calldata_size_limit": 200000,
"public_input_config": {
"max_tx_num": 44,
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}
}
}

View File

@@ -16,11 +16,9 @@ func TestConfig(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.True(t, assert.NoError(t, err), "failed to load config")
assert.True(t, len(cfg.L2Config.BatchProposerConfig.SkippedOpcodes) > 0)
assert.True(t, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys) > 0)
assert.Equal(t, 1, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys))
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys))
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys))
data, err := json.Marshal(cfg)
assert.NoError(t, err)

View File

@@ -1,19 +1,24 @@
package config
import "github.com/scroll-tech/go-ethereum/common"
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/rpc"
)
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
Confirmations rpc.BlockNumber `json:"confirmations"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
// The L1ScrollMessenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address"`
// The rollup contract address deployed on layer 1 chain.
RollupContractAddress common.Address `json:"rollup_contract_address"`
// The L1MessageQueue contract address deployed on layer 1 chain.
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
// The ScrollChain contract address deployed on layer 1 chain.
ScrollChainContractAddress common.Address `json:"scroll_chain_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}

View File

@@ -1,19 +1,23 @@
package config
import (
"encoding/json"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
Confirmations rpc.BlockNumber `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
L2MessengerAddress common.Address `json:"l2_messenger_address"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config
@@ -30,44 +34,12 @@ type BatchProposerConfig struct {
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Time waited to commit batches before the calldata met CommitTxCalldataSizeLimit
BatchCommitTimeSec uint64 `json:"batch_commit_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes map[string]struct{} `json:"-"`
}
// batchProposerConfigAlias RelayerConfig alias name
type batchProposerConfigAlias BatchProposerConfig
// UnmarshalJSON unmarshal BatchProposerConfig config struct.
func (b *BatchProposerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*b = BatchProposerConfig(jsonConfig.batchProposerConfigAlias)
b.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
for _, opcode := range jsonConfig.SkippedOpcodes {
b.SkippedOpcodes[opcode] = struct{}{}
}
return nil
}
// MarshalJSON marshal BatchProposerConfig in order to transfer skipOpcodes.
func (b *BatchProposerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}{batchProposerConfigAlias(*b), nil}
// Load skipOpcodes.
for op := range b.SkippedOpcodes {
jsonConfig.SkippedOpcodes = append(jsonConfig.SkippedOpcodes, op)
}
return json.Marshal(&jsonConfig)
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
// The public input hash config
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
)
// SenderConfig The config for transaction sender
@@ -19,7 +20,7 @@ type SenderConfig struct {
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
Confirmations rpc.BlockNumber `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
@@ -40,11 +41,26 @@ type RelayerConfig struct {
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// gas oracle config
GasOracleConfig *GasOracleConfig `json:"gas_oracle_config"`
// The interval in which we send finalize batch transactions.
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// GasOracleConfig The config for updating gas price oracle.
type GasOracleConfig struct {
// MinGasPrice store the minimum gas price to set.
MinGasPrice uint64 `json:"min_gas_price"`
// GasPriceDiff store the percentage of gas price difference.
GasPriceDiff uint64 `json:"gas_price_diff"`
}
// relayerConfigAlias RelayerConfig alias name
@@ -55,15 +71,17 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
// Get messenger private key list.
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
// Get messenger private key list.
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
@@ -72,6 +90,15 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
// Get gas oracle private key list.
for _, privStr := range jsonConfig.GasOracleSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.GasOracleSenderPrivateKeys = append(r.GasOracleSenderPrivateKeys, priv)
}
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
@@ -89,15 +116,21 @@ func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil}
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys,omitempty"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil, nil}
// Transfer message sender private keys to hex type.
for _, priv := range r.MessageSenderPrivateKeys {
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.GasOracleSenderPrivateKeys {
jsonConfig.GasOracleSenderPrivateKeys = append(jsonConfig.GasOracleSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.RollupSenderPrivateKeys {
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))

View File

@@ -3,43 +3,39 @@ module scroll-tech/bridge
go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/ethereum/go-ethereum v1.11.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/scroll-tech/zktrie v0.5.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/sys v0.5.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -71,10 +71,10 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -111,8 +111,8 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.11.1 h1:EMymmWFzpS7G9l9NvVN8G73cgdUIqDPNRf2YTSGBXlk=
github.com/ethereum/go-ethereum v1.11.1/go.mod h1:DuefStAgaxoaYGLR0FueVcVbehmn5n9QUcVrMCuOvuc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -139,8 +139,9 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -250,8 +251,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -268,16 +268,15 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -316,7 +315,6 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -342,19 +340,16 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6 h1:2kXWJR+mOj09HBh5sUTb4L/OURPSXoQd1NC/10v7otM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6/go.mod h1:eW+eyNdMoO0MyuczCc9xWSnW8dPJ0kOy5xsxgOKYEaA=
github.com/scroll-tech/zktrie v0.5.0 h1:dABDR6lMZq6Hs+fWQSiHbX8s3AOX6hY+5nkhSYm5rmU=
github.com/scroll-tech/zktrie v0.5.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -369,8 +364,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -392,11 +387,11 @@ github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYa
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
@@ -425,8 +420,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -535,14 +530,13 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -554,12 +548,12 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -26,12 +26,12 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
return &Backend{
cfg: cfg,

View File

@@ -11,16 +11,23 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
"scroll-tech/common/types"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -28,51 +35,78 @@ import (
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer1Relayer struct {
ctx context.Context
client *ethclient.Client
sender *sender.Sender
ctx context.Context
db orm.L1MessageOrm
db database.OrmFactory
cfg *config.RelayerConfig
// channel used to communicate with transaction sender
confirmationCh <-chan *sender.Confirmation
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l2MessengerABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l1GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
stopCh chan struct{}
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
return nil, err
}
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
// @todo make sure only one sender is available
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new sender failed", "main address", addr.String(), "err", err)
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
log.Error("new GasOracleSender failed", "main address", addr.String(), "err", err)
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
return &Layer1Relayer{
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l2MessengerABI: l2MessengerABI,
cfg: cfg,
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
stopCh: make(chan struct{}),
}, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
msgs, err := r.db.GetL1MessagesByStatus(types.MsgPending, 100)
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
@@ -92,76 +126,138 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
}
}
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// @todo add support to relay multiple messages
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", from, target, value, fee, deadline, msgNonce, calldata)
if err != nil {
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
}
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
return err
}
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
}
return err
}
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.db.GetLatestL1BlockHeight()
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return
}
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
"number": latestBlockHeight,
})
if err != nil {
log.Error("Failed to GetL1BlockInfos from db", "height", latestBlockHeight, "err", err)
return
}
if len(blocks) != 1 {
log.Error("Block not exist", "height", latestBlockHeight)
return
}
block := blocks[0]
if block.GasOracleStatus == types.GasOraclePending {
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
baseFee := big.NewInt(int64(block.BaseFee))
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", baseFee)
if err != nil {
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
}
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
}
r.lastGasPrice = block.BaseFee
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
}
// Start the relayer process
func (r *Layer1Relayer) Start() {
log.Info("Starting l1/relayer")
go func() {
// trigger by timer
ticker := time.NewTicker(3 * time.Second)
loop := func(ctx context.Context, f func()) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// number, err := r.client.BlockNumber(r.ctx)
// log.Info("receive header", "height", number)
r.ProcessSavedEvents()
case cfm := <-r.confirmationCh:
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case <-r.stopCh:
case <-ctx.Done():
return
case <-ticker.C:
f()
}
}
}
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go loop(ctx, r.ProcessSavedEvents)
go loop(ctx, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.messageCh:
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database/migrate"
@@ -20,10 +19,7 @@ func testCreateNewL1Relayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()

View File

@@ -8,17 +8,25 @@ import (
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
)
var (
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
@@ -26,9 +34,9 @@ type relayedMessage struct {
}
type rollupEvent struct {
batchID common.Hash
txHash common.Hash
status orm.RollupStatus
batchHash common.Hash
txHash common.Hash
status types.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
@@ -38,22 +46,28 @@ type Watcher struct {
db database.OrmFactory
// The number of new blocks to wait for a block to be confirmed
confirmations uint64
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
rollupAddress common.Address
rollupABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
scrollChainAddress common.Address
scrollChainABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
stop chan bool
stopCh chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -63,56 +77,147 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
savedHeight = int64(startHeight)
}
stop := make(chan bool)
savedL1BlockHeight, err := db.GetLatestL1BlockHeight()
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0
}
if savedL1BlockHeight < startHeight {
savedL1BlockHeight = startHeight
}
stopCh := make(chan bool)
return &Watcher{
ctx: ctx,
client: client,
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1MessengerMetaABI,
rollupAddress: rollupAddress,
rollupABI: bridge_abi.RollupMetaABI,
processedMsgHeight: uint64(savedHeight),
stop: stop,
ctx: ctx,
client: client,
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L1ScrollMessengerABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L1MessageQueueABI,
scrollChainAddress: scrollChainAddress,
scrollChainABI: bridge_abi.ScrollChainABI,
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
stopCh: stopCh,
}
}
// Start the Watcher module.
func (w *Watcher) Start() {
log.Info("Starting l1/watcher")
go func() {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
ctx, cancel := context.WithCancel(w.ctx)
for ; true; <-ticker.C {
select {
case <-w.stop:
return
go func(ctx context.Context) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
default:
blockNumber, err := w.client.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get block number", "err", err)
continue
}
if err := w.FetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
if err := w.FetchBlockHeader(number); err != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
}
}
}
}
}(ctx)
go func(ctx context.Context) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
continue
}
if err := w.FetchContractEvent(number); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
}
}
}(ctx)
<-w.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *Watcher) Stop() {
w.stop <- true
w.stopCh <- true
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
return nil
}
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
var blocks []*types.L1BlockInfo
var err error
height := fromBlock
for ; height <= toBlock; height++ {
var block *geth_types.Header
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
if err != nil {
log.Warn("Failed to get block", "height", height, "err", err)
break
}
blocks = append(blocks, &types.L1BlockInfo{
Number: uint64(height),
Hash: block.Hash().String(),
BaseFee: block.BaseFee.Uint64(),
})
}
// failed at first block, return with the error
if height == fromBlock {
return err
}
toBlock = height - 1
// insert succeed blocks
err = w.db.InsertL1Blocks(w.ctx, blocks)
if err != nil {
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
return err
}
// update processed height
w.processedBlockHeight = uint64(toBlock)
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
@@ -120,7 +225,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
@@ -135,16 +240,17 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
w.scrollChainAddress,
w.messageQueueAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
query.Topics[0][0] = bridge_abi.L1QueueTransactionEventSignature
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
@@ -153,6 +259,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
@@ -165,29 +272,29 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchIDs []string
var batchHashes []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
batchHashes = append(batchHashes, event.batchHash.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
statuses, err := w.db.GetRollupStatusByHashList(batchHashes)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
log.Error("Failed to GetRollupStatusByHashList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
if len(statuses) != len(batchHashes) {
log.Error("RollupStatus.Length mismatch with batchHashes.Length", "RollupStatus.Length", len(statuses), "batchHashes.Length", len(batchHashes))
return nil
}
for index, event := range rollupEvents {
batchID := event.batchID.String()
batchHash := event.batchHash.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
if event.status == types.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
} else if event.status == types.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
@@ -199,14 +306,13 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
msgStatus = types.MsgConfirmed
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
msgStatus = types.MsgFailed
}
if err != nil {
if err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
@@ -217,113 +323,93 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
}
w.processedMsgHeight = uint64(to)
bridgeL1MsgSyncHeightGauge.Update(to)
}
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1Messages []*orm.L1Message
var l1Messages []*types.L1Message
var relayedMessages []relayedMessage
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
case bridge_abi.L1QueueTransactionEventSignature:
event := bridge_abi.L1QueueTransactionEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
l1Messages = append(l1Messages, &types.L1Message{
QueueIndex: event.QueueIndex.Uint64(),
MsgHash: msgHash.String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Calldata: common.Bytes2Hex(event.Data),
GasLimit: event.GasLimit.Uint64(),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
case bridge_abi.L1RelayedMessageEventSignature:
event := bridge_abi.L1RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
case bridge_abi.L1FailedRelayedMessageEventSignature:
event := bridge_abi.L1FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "CommitBatch", vLog.Data)
case bridge_abi.L1CommitBatchEventSignature:
event := bridge_abi.L1CommitBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupCommitted,
batchHash: event.BatchHash,
txHash: vLog.TxHash,
status: types.RollupCommitted,
})
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "FinalizeBatch", vLog.Data)
case bridge_abi.L1FinalizeBatchEventSignature:
event := bridge_abi.L1FinalizeBatchEvent{}
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupFinalized,
batchHash: event.BatchHash,
txHash: vLog.TxHash,
status: types.RollupFinalized,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)

View File

@@ -23,7 +23,7 @@ func testStartWatcher(t *testing.T) {
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
}

View File

@@ -14,10 +14,11 @@ import (
// Backend manage the resources and services of L2 backend.
// The backend should monitor events in layer 2 and relay transactions to layer 1
type Backend struct {
cfg *config.L2Config
l2Watcher *WatcherClient
relayer *Layer2Relayer
orm database.OrmFactory
cfg *config.L2Config
watcher *WatcherClient
relayer *Layer2Relayer
batchProposer *BatchProposer
orm database.OrmFactory
}
// New returns a new instance of Backend.
@@ -27,32 +28,39 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
// Note: initialize watcher before relayer to keep DB consistent.
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, orm)
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
return &Backend{
cfg: cfg,
l2Watcher: l2Watcher,
relayer: relayer,
orm: orm,
cfg: cfg,
watcher: watcher,
relayer: relayer,
batchProposer: batchProposer,
orm: orm,
}, nil
}
// Start Backend module.
func (l2 *Backend) Start() error {
l2.l2Watcher.Start()
l2.watcher.Start()
l2.relayer.Start()
l2.batchProposer.Start()
return nil
}
// Stop Backend module.
func (l2 *Backend) Stop() {
l2.l2Watcher.Stop()
l2.batchProposer.Stop()
l2.relayer.Stop()
l2.watcher.Stop()
}
// APIs collect API modules.
@@ -61,7 +69,7 @@ func (l2 *Backend) APIs() []rpc.API {
{
Namespace: "l2",
Version: "1.0",
Service: WatcherAPI(l2.l2Watcher),
Service: WatcherAPI(l2.watcher),
Public: true,
},
}

View File

@@ -1,105 +1,28 @@
package l2
import (
"context"
"fmt"
"math"
"reflect"
"sync"
"time"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/common/types"
"scroll-tech/database"
bridgeabi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
type batchProposer struct {
mutex sync.Mutex
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
}
func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory) *batchProposer {
return &batchProposer{
mutex: sync.Mutex{},
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
proofGenerationFreq: cfg.ProofGenerationFreq,
skippedOpcodes: cfg.SkippedOpcodes,
}
}
func (w *batchProposer) tryProposeBatch() {
w.mutex.Lock()
defer w.mutex.Unlock()
blocks, err := w.orm.GetUnbatchedBlocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
length = len(blocks)
gasUsed, txNum uint64
)
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
blocks = blocks[:i]
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return
}
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
dbTx, err := w.orm.Beginx()
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
// in all blocks included in the batch.
func AddBatchInfoToDB(db database.OrmFactory, batchData *types.BatchData) error {
dbTx, err := db.Beginx()
if err != nil {
return err
}
@@ -113,28 +36,328 @@ func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
}
}()
var (
batchID string
startBlock = blocks[0]
endBlock = blocks[len(blocks)-1]
txNum, gasUsed uint64
blockIDs = make([]uint64, len(blocks))
)
for i, block := range blocks {
txNum += block.TxNum
gasUsed += block.GasUsed
blockIDs[i] = block.Number
}
batchID, dbTxErr = w.orm.NewBatchInDBTx(dbTx, startBlock, endBlock, startBlock.ParentHash, txNum, gasUsed)
if dbTxErr != nil {
if dbTxErr = db.NewBatchInDBTx(dbTx, batchData); dbTxErr != nil {
return dbTxErr
}
if dbTxErr = w.orm.SetBatchIDForBlocksInDBTx(dbTx, blockIDs, batchID); dbTxErr != nil {
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
if dbTxErr = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchData.Hash().Hex()); dbTxErr != nil {
return dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
}
// BatchProposer sends batches commit transactions to relayer.
type BatchProposer struct {
mutex sync.Mutex
ctx context.Context
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
batchCommitTimeSec uint64
commitCalldataSizeLimit uint64
batchDataBufferSizeLimit uint64
proofGenerationFreq uint64
batchDataBuffer []*types.BatchData
relayer *Layer2Relayer
piCfg *types.PublicInputHashConfig
stopCh chan struct{}
}
// NewBatchProposer will return a new instance of BatchProposer.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
p := &BatchProposer{
mutex: sync.Mutex{},
ctx: ctx,
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
batchCommitTimeSec: cfg.BatchCommitTimeSec,
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
proofGenerationFreq: cfg.ProofGenerationFreq,
piCfg: cfg.PublicInputConfig,
relayer: relayer,
stopCh: make(chan struct{}),
}
// for graceful restart.
p.recoverBatchDataBuffer()
// try to commit the leftover pending batches
p.tryCommitBatches()
return p
}
// Start the Listening process
func (p *BatchProposer) Start() {
go func() {
if reflect.ValueOf(p.orm).IsNil() {
panic("must run BatchProposer with DB")
}
ctx, cancel := context.WithCancel(p.ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
p.tryProposeBatch()
p.tryCommitBatches()
}
}
}(ctx)
<-p.stopCh
cancel()
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (p *BatchProposer) Stop() {
p.stopCh <- struct{}{}
}
func (p *BatchProposer) recoverBatchDataBuffer() {
// batches are sorted by batch index in increasing order
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
if err != nil {
log.Crit("Failed to fetch pending L2 batches", "err", err)
}
if len(batchHashes) == 0 {
return
}
log.Info("Load pending batches into batchDataBuffer")
// helper function to cache and get BlockBatch from DB
blockBatchCache := make(map[string]*types.BlockBatch)
getBlockBatch := func(batchHash string) (*types.BlockBatch, error) {
if blockBatch, ok := blockBatchCache[batchHash]; ok {
return blockBatch, nil
}
blockBatches, err := p.orm.GetBlockBatches(map[string]interface{}{"hash": batchHash})
if err != nil || len(blockBatches) == 0 {
return nil, err
}
blockBatchCache[batchHash] = blockBatches[0]
return blockBatches[0], nil
}
// recover the in-memory batchData from DB
for _, batchHash := range batchHashes {
log.Info("recover batch data from pending batch", "batch_hash", batchHash)
blockBatch, err := getBlockBatch(batchHash)
if err != nil {
log.Error("could not get BlockBatch", "batch_hash", batchHash, "error", err)
continue
}
parentBatch, err := getBlockBatch(blockBatch.ParentHash)
if err != nil {
log.Error("could not get parent BlockBatch", "batch_hash", batchHash, "error", err)
continue
}
blockInfos, err := p.orm.GetL2BlockInfos(
map[string]interface{}{"batch_hash": batchHash},
"order by number ASC",
)
if err != nil {
log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err)
continue
}
if len(blockInfos) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) {
log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB",
"len(blockInfos)", len(blockInfos),
"expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1)
continue
}
batchData, err := p.generateBatchData(parentBatch, blockInfos)
if err != nil {
continue
}
if batchData.Hash().Hex() != batchHash {
log.Error("the hash from recovered batch data mismatches the DB entry",
"recovered_batch_hash", batchData.Hash().Hex(),
"expected", batchHash)
continue
}
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
}
}
func (p *BatchProposer) tryProposeBatch() {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
blocks, err := p.orm.GetUnbatchedL2Blocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", p.batchBlocksLimit),
)
if err != nil {
log.Error("failed to get unbatched blocks", "err", err)
return
}
p.proposeBatch(blocks)
}
}
func (p *BatchProposer) tryCommitBatches() {
p.mutex.Lock()
defer p.mutex.Unlock()
if len(p.batchDataBuffer) == 0 {
return
}
// estimate the calldata length to determine whether to commit the pending batches
index := 0
commit := false
calldataByteLen := uint64(0)
for ; index < len(p.batchDataBuffer); index++ {
calldataByteLen += bridgeabi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
if calldataByteLen > p.commitCalldataSizeLimit {
commit = true
if index == 0 {
log.Warn(
"The calldata size of one batch is larger than the threshold",
"batch_hash", p.batchDataBuffer[0].Hash().Hex(),
"calldata_size", calldataByteLen,
)
index = 1
}
break
}
}
if !commit && p.batchDataBuffer[0].Timestamp()+p.batchCommitTimeSec > uint64(time.Now().Unix()) {
return
}
// Send commit tx for batchDataBuffer[0:index]
log.Info("Commit batches", "start_index", p.batchDataBuffer[0].Batch.BatchIndex,
"end_index", p.batchDataBuffer[index-1].Batch.BatchIndex)
err := p.relayer.SendCommitTx(p.batchDataBuffer[:index])
if err != nil {
// leave the retry to the next ticker
log.Error("SendCommitTx failed", "error", err)
} else {
// pop the processed batches from the buffer
p.batchDataBuffer = p.batchDataBuffer[index:]
}
}
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
if len(blocks) == 0 {
return
}
if blocks[0].GasUsed > p.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > p.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var gasUsed, txNum uint64
reachThreshold := false
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
blocks = blocks[:i]
reachThreshold = true
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return
}
if err := p.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
lastBatch, err := p.orm.GetLatestBatch()
if err != nil {
// We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block.
return err
}
batchData, err := p.generateBatchData(lastBatch, blocks)
if err != nil {
log.Error("createBatchData failed", "error", err)
return err
}
if err := AddBatchInfoToDB(p.orm, batchData); err != nil {
log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err)
return err
}
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
return nil
}
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) {
var traces []*geth_types.BlockTrace
for _, block := range blocks {
trs, err := p.orm.GetL2BlockTraces(map[string]interface{}{"hash": block.Hash})
if err != nil || len(trs) != 1 {
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
return nil, err
}
traces = append(traces, trs[0])
}
return types.NewBatchData(parentBatch, traces, p.piCfg), nil
}
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
for _, batchData := range p.batchDataBuffer {
size += bridgeabi.GetBatchCalldataLength(&batchData.Batch)
}
return
}

View File

@@ -1,13 +1,12 @@
package l2
import (
"encoding/json"
"context"
"fmt"
"math/big"
"os"
"math"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
@@ -15,48 +14,90 @@ import (
"scroll-tech/bridge/config"
"scroll-tech/common/utils"
"scroll-tech/common/types"
)
func testBatchProposer(t *testing.T) {
func testBatchProposerProposeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
trace2 := &types.BlockTrace{}
trace3 := &types.BlockTrace{}
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace2)
assert.NoError(t, err)
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace3)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace1}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
wc.Start()
defer wc.Stop()
proposer := newBatchProposer(&config.BatchProposerConfig{
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
}, relayer, db)
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, true, len(infos) == 0)
assert.Equal(t, 0, len(infos))
exist, err := db.BatchRecordExist(id)
exist, err := db.BatchRecordExist(batchData1.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func testBatchProposerGracefulRestart(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace2}))
// Insert block batch into db.
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
assert.NoError(t, dbTx.Commit())
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 1, len(batchHashes))
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
// test p.recoverBatchDataBuffer().
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, relayer, db)
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
assert.NoError(t, err)
assert.Equal(t, 0, len(batchHashes))
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -1,12 +1,17 @@
package l2
import (
"encoding/json"
"fmt"
"os"
"testing"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
)
@@ -22,6 +27,14 @@ var (
// l2geth client
l2Cli *ethclient.Client
// block trace
blockTrace1 *geth_types.BlockTrace
blockTrace2 *geth_types.BlockTrace
// batch data
batchData1 *types.BatchData
batchData2 *types.BatchData
)
func setupEnv(t *testing.T) (err error) {
@@ -47,6 +60,40 @@ func setupEnv(t *testing.T) (err error) {
l2Cli, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
blockTrace1 = &geth_types.BlockTrace{}
if err = json.Unmarshal(templateBlockTrace1, blockTrace1); err != nil {
return err
}
parentBatch1 := &types.BlockBatch{
Index: 1,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData1 = types.NewBatchData(parentBatch1, []*geth_types.BlockTrace{blockTrace1}, nil)
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
blockTrace2 = &geth_types.BlockTrace{}
if err = json.Unmarshal(templateBlockTrace2, blockTrace2); err != nil {
return err
}
parentBatch2 := &types.BlockBatch{
Index: batchData1.Batch.BatchIndex,
Hash: batchData1.Hash().Hex(),
}
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
return err
}
@@ -75,11 +122,12 @@ func TestFunction(t *testing.T) {
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)
// Run batch proposer test cases.
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
t.Cleanup(func() {
free(t)

View File

@@ -13,12 +13,15 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
@@ -26,6 +29,12 @@ import (
"scroll-tech/bridge/utils"
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -35,6 +44,8 @@ import (
type Layer2Relayer struct {
ctx context.Context
l2Client *ethclient.Client
db database.OrmFactory
cfg *config.RelayerConfig
@@ -46,23 +57,31 @@ type Layer2Relayer struct {
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
gasOracleCh <-chan *sender.Confirmation
l2GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// A list of processing batches commitment.
// key(string): confirmation ID, value([]string): batch hashes.
processingBatchesCommitment sync.Map
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
@@ -76,28 +95,55 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
return nil, err
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
log.Error("Failed to create gas oracle sender", "err", err)
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
return &Layer2Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
ctx: ctx,
db: db,
l2Client: l2Client,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.ScrollChainABI,
gasOracleSender: gasOracleSender,
gasOracleCh: gasOracleSender.ConfirmChan(),
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
@@ -106,7 +152,7 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
map[string]interface{}{"status": types.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
@@ -126,7 +172,7 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
return r.processSavedEvent(msg)
})
}
if err := g.Wait(); err != nil {
@@ -138,13 +184,24 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// Get the block info that contains the message
blockInfos, err := r.db.GetL2BlockInfos(map[string]interface{}{"number": msg.Height})
if err != nil {
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
}
blockInfo := blockInfos[0]
if !blockInfo.BatchHash.Valid {
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
return nil
}
// TODO: rebuild the withdraw trie to generate the merkle proof
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
BatchHash: common.HexToHash(blockInfo.BatchHash.String),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
@@ -155,23 +212,21 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
@@ -183,7 +238,7 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
@@ -192,123 +247,104 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.db.GetLatestBatch()
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
if len(batchesInDB) == 0 {
return
}
id := batchesInDB[0]
// @todo add support to relay multiple batches
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
return
}
batch := batches[0]
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
if err != nil || len(traces) == 0 {
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
log.Error("Failed to GetLatestBatch", "err", err)
return
}
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
BatchIndex: batch.Index,
ParentHash: common.HexToHash(batch.ParentHash),
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
}
parentHash := common.HexToHash(batch.ParentHash)
for i, trace := range traces {
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: trace.Header.Hash(),
ParentHash: parentHash,
BaseFee: trace.Header.BaseFee,
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.Header.Number.Uint64(),
GasUsed: 0,
Timestamp: trace.Header.Time,
ExtraData: make([]byte, 0),
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
}
for j, tx := range trace.Transactions {
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
R: tx.R.ToInt(),
S: tx.S.ToInt(),
V: tx.V.ToInt().Uint64(),
}
if tx.To != nil {
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
}
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
}
// for next iteration
parentHash = layer2Batch.Blocks[i].BlockHash
}
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
if err != nil {
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
return
}
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
if err != nil && err.Error() == "execution reverted: Parent batch hasn't been committed" {
// check parent is committing
batches, err = r.db.GetBlockBatches(map[string]interface{}{"end_block_hash": batch.ParentHash})
if err != nil || len(batches) == 0 {
log.Error("Failed to get parent batch from db", "batch_id", id, "parent_hash", batch.ParentHash, "err", err)
if batch.OracleStatus == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
parentBatch := batches[0]
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
if parentBatch.RollupStatus >= orm.RollupCommitting {
// retry with manual gas estimation
gasLimit := estimateCommitBatchGas(len(data), len(layer2Batch.Blocks))
hash, err = r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, gasLimit)
log.Info("commitBatch tx resent with manual gas estimation ", "id", id, "index", batch.Index, "gasLimit", gasLimit, "hash", hash.String(), "err", err)
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
// SendCommitTx sends commitBatches tx to L1.
func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
if len(batchData) == 0 {
log.Error("SendCommitTx receives empty batch")
return nil
}
// pack calldata
commitBatches := make([]bridge_abi.IScrollChainBatch, len(batchData))
for i, batch := range batchData {
commitBatches[i] = batch.Batch
}
calldata, err := r.l1RollupABI.Pack("commitBatches", commitBatches)
if err != nil {
log.Error("Failed to pack commitBatches",
"error", err,
"start_batch_index", commitBatches[0].BatchIndex,
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
return err
}
// generate a unique txID and send transaction
var bytes []byte
for _, batch := range batchData {
bytes = append(bytes, batch.Hash().Bytes()...)
}
txID := crypto.Keccak256Hash(bytes).String()
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
}
return
return err
}
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
log.Info("Sent the commitBatches tx to layer1",
"tx_hash", txHash.Hex(),
"start_batch_index", commitBatches[0].BatchIndex,
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
batchHashes := make([]string, len(batchData))
for i, batch := range batchData {
batchHashes[i] = batch.Hash().Hex()
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
}
}
r.processingCommitment.Store(txID, id)
r.processingBatchesCommitment.Store(txID, batchHashes)
return nil
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
@@ -318,99 +354,134 @@ func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
batchHashes, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) == 0 {
if len(batchHashes) == 0 {
return
}
id := batches[0]
hash := batchHashes[0]
// @todo add support to relay multiple batches
status, err := r.db.GetProvingStatusByID(id)
batches, err := r.db.GetBlockBatches(map[string]interface{}{"hash": hash}, "LIMIT 1")
if err != nil {
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
return
}
if len(batches) == 0 {
log.Error("Unexpected result for GetBlockBatches", "hash", hash, "len", 0)
return
}
batch := batches[0]
status := batch.ProvingStatus
switch status {
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case orm.ProvingTaskProved:
case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case orm.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "id", id)
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
previousBatch, err := r.db.GetLatestFinalizingOrFinalizedBatch()
// skip submitting proof
if err == nil && uint64(batch.CreatedAt.Sub(*previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
log.Info(
"Not enough time passed, skipping",
"hash", hash,
"createdAt", batch.CreatedAt,
"lastFinalizingHash", previousBatch.Hash,
"lastFinalizingStatus", previousBatch.RollupStatus,
"lastFinalizingCreatedAt", previousBatch.CreatedAt,
)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} else {
success = true
}
return
}
// handle unexpected db error
if err != nil && err.Error() != "sql: no rows in result set" {
log.Error("Failed to get latest finalized batch", "err", err)
return
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByHash(hash)
if err != nil {
log.Warn("fetch get proof by id failed", "id", id, "err", err)
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "id", id)
log.Warn("proof or instance not ready", "hash", hash)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(instanceBuffer))
return
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
txID := id + "-finalize"
txID := hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
hash := &txHash
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
}
return
}
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
}
success = true
r.processingFinalization.Store(txID, id)
r.processingFinalization.Store(txID, hash)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
@@ -421,30 +492,58 @@ func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
// Start the relayer process
func (r *Layer2Relayer) Start() {
log.Info("Starting l2/relayer")
go func() {
// trigger by timer
loop := func(ctx context.Context, f func()) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
var wg = sync.WaitGroup{}
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case <-r.stopCh:
case <-ctx.Done():
return
case <-ticker.C:
f()
}
}
}
go func() {
ctx, cancel := context.WithCancel(r.ctx)
go loop(ctx, r.ProcessSavedEvents)
go loop(ctx, r.ProcessCommittedBatches)
go loop(ctx, r.ProcessGasPriceOracle)
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleCh:
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}(ctx)
<-r.stopCh
cancel()
}()
}
@@ -464,42 +563,35 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), types.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is block commitment transaction
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
// check whether it is CommitBatches transaction
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"
for _, batchHash := range batchBatches.([]string) {
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), types.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
}
}
r.processingCommitment.Delete(confirmation.ID)
r.processingBatchesCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), types.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
}
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func estimateCommitBatchGas(callDataLength int, numBlocks int) uint64 {
gasLimit := uint64(0)
gasLimit += 16 * uint64(callDataLength) // calldata cost
gasLimit += 4*2100 + 3*22100 // fixed cost per batch
gasLimit += 4 * 22100 * uint64(numBlocks) // cost per block in batch
gasLimit = gasLimit * 12 / 10 // apply multiplier
return gasLimit
}

View File

@@ -5,28 +5,26 @@ import (
"encoding/json"
"math/big"
"os"
"sync"
"strconv"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
)
var (
templateL2Message = []*orm.L2Message{
templateL2Message = []*types.L2Message{
{
Nonce: 1,
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "100",
Fee: "100",
GasLimit: 11529940,
Deadline: uint64(time.Now().Unix()),
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer2Hash: "hash0",
@@ -41,7 +39,7 @@ func testCreateNewRelayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -56,113 +54,43 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
traces := []*types.BlockTrace{
traces := []*geth_types.BlockTrace{
{
Header: &types.Header{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)),
},
},
{
Header: &types.Header{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
},
},
}
err = db.InsertBlockTraces(traces)
err = db.InsertL2BlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: templateL2Message[0].Height},
&orm.BlockInfo{Number: templateL2Message[0].Height + 1},
"0f", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
templateL2Message[0].Height,
templateL2Message[0].Height + 1}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex()
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{1}, batchHash))
assert.NoError(t, dbTx.Commit())
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var traces []*types.BlockTrace
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
templateBlockTrace, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
blockTrace = &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: traces[0].Header.Number.Uint64()},
&orm.BlockInfo{Number: traces[1].Header.Number.Uint64()},
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
assert.Equal(t, types.MsgSubmitted, msg.Status)
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
@@ -173,35 +101,32 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex()
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupCommitted)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchID)
status, err := db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
assert.Equal(t, types.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
@@ -212,62 +137,75 @@ func testL2RelayerSkipBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
batchData := genBatchData(t, index)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
batchHash := batchData.Hash().Hex()
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
err = db.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
err = db.UpdateProvingStatus(batchHash, provingStatus)
assert.NoError(t, err)
return batchID
return batchHash
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskSkipped, 1),
createBatch(types.RollupCommitted, types.ProvingTaskFailed, 2),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
createBatch(types.RollupPending, types.ProvingTaskSkipped, 3),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped, 4),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped, 5),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped, 6),
createBatch(types.RollupPending, types.ProvingTaskFailed, 7),
createBatch(types.RollupCommitting, types.ProvingTaskFailed, 8),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed, 9),
createBatch(types.RollupFinalized, types.ProvingTaskFailed, 10),
createBatch(types.RollupCommitted, types.ProvingTaskVerified, 11),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
relayer.ProcessCommittedBatches()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
assert.Equal(t, types.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
assert.NotEqual(t, types.RollupFinalizationSkipped, status)
}
}
func genBatchData(t *testing.T, index uint64) *types.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
// unmarshal blockTrace
blockTrace := &geth_types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
blockTrace.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
parentBatch := &types.BlockBatch{
Index: index,
Hash: "0x0000000000000000000000000000000000000000",
}
return types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
}

View File

@@ -2,6 +2,7 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"reflect"
@@ -10,18 +11,24 @@ import (
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/common/types"
"scroll-tech/bridge/config"
"scroll-tech/database"
)
// Metrics
var (
bridgeL2MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l2/msg/sync/height", nil)
)
type relayedMessage struct {
@@ -39,45 +46,102 @@ type WatcherClient struct {
orm database.OrmFactory
confirmations uint64
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
batchProposer *batchProposer
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, orm database.OrmFactory) *WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
return &WatcherClient{
w := WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2MessengerMetaABI,
stopCh: make(chan struct{}),
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2ScrollMessengerABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridge_abi.L2MessageQueueABI,
stopCh: make(chan struct{}),
stopped: 0,
}
// Initialize genesis before we do anything else
if err := w.initializeGenesis(); err != nil {
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
}
return &w
}
func (w *WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
log.Info("genesis already imported")
return nil
}
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
if err != nil {
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
}
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
blockTrace := &geth_types.BlockTrace{
Coinbase: nil,
Header: genesis,
Transactions: []*geth_types.TransactionData{},
StorageTrace: nil,
ExecutionResults: []*geth_types.ExecutionResult{},
MPTWitness: nil,
}
batchData := types.NewGenesisBatchData(blockTrace)
if err = AddBatchInfoToDB(w.orm, batchData); err != nil {
log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err)
return err
}
batchHash := batchData.Hash().Hex()
if err = w.orm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
}
if err = w.orm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
}
log.Info("successfully imported genesis batch")
return nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
log.Info("Starting l2/watcher")
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
@@ -87,7 +151,7 @@ func (w *WatcherClient) Start() {
// trace fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
@@ -96,19 +160,12 @@ func (w *WatcherClient) Start() {
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
log.Error("failed to get block number", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.tryFetchRunningMissingBlocks(ctx, number)
}
}
@@ -116,7 +173,7 @@ func (w *WatcherClient) Start() {
// event fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
@@ -125,40 +182,17 @@ func (w *WatcherClient) Start() {
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
log.Error("failed to get block number", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.FetchContractEvent(number)
}
}
}(ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
w.batchProposer.tryProposeBatch()
}
}
}(ctx)
<-w.stopCh
cancel()
}()
@@ -176,9 +210,9 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
heightInDB, err := w.orm.GetL2BlockTracesLatestHeight()
if err != nil {
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
log.Error("failed to GetL2BlockTracesLatestHeight", "err", err)
return
}
@@ -204,7 +238,7 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
var traces []*geth_types.BlockTrace
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
@@ -218,7 +252,7 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
}
if len(traces) > 0 {
if err := w.orm.InsertBlockTraces(traces); err != nil {
if err := w.orm.InsertL2BlockTraces(traces); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
@@ -250,13 +284,15 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.messageQueueAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0] = make([]common.Hash, 4)
query.Topics[0][0] = bridge_abi.L2SentMessageEventSignature
query.Topics[0][1] = bridge_abi.L2RelayedMessageEventSignature
query.Topics[0][2] = bridge_abi.L2FailedRelayedMessageEventSignature
query.Topics[0][3] = bridge_abi.L2AppendMessageEventSignature
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
@@ -265,6 +301,7 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL2MsgSyncHeightGauge.Update(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
@@ -278,14 +315,13 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
msgStatus = types.MsgConfirmed
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
msgStatus = types.MsgFailed
}
if err != nil {
if err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
@@ -297,71 +333,95 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
}
w.processedMsgHeight = uint64(to)
bridgeL2MsgSyncHeightGauge.Update(to)
}
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2Messages []*orm.L2Message
var l2Messages []*types.L2Message
var relayedMessages []relayedMessage
var lastAppendMsgHash common.Hash
var lastAppendMsgNonce uint64
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
case bridge_abi.L2SentMessageEventSignature:
event := bridge_abi.L2SentMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err)
return l2Messages, relayedMessages, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
computedMsgHash := utils.ComputeMessageHash(
event.Sender,
event.Target,
event.Value,
event.MessageNonce,
event.Message,
)
// `AppendMessage` event is always emitted before `SentMessage` event
// So they should always match, just double check
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
return l2Messages, relayedMessages, errors.New(errMsg)
}
if computedMsgHash != lastAppendMsgHash {
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
return l2Messages, relayedMessages, errors.New(errMsg)
}
l2Messages = append(l2Messages, &types.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: computedMsgHash.String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
case bridge_abi.L2RelayedMessageEventSignature:
event := bridge_abi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
return l2Messages, relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
case bridge_abi.L2FailedRelayedMessageEventSignature:
event := bridge_abi.L2FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
return l2Messages, relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case bridge_abi.L2AppendMessageEventSignature:
event := bridge_abi.L2AppendMessageEvent{}
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
return l2Messages, relayedMessages, err
}
lastAppendMsgHash = event.MessageHash
lastAppendMsgNonce = event.Index.Uint64()
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}

View File

@@ -10,17 +10,18 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/common/types"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
)
func testCreateNewWatcherAndStop(t *testing.T) {
@@ -31,12 +32,12 @@ func testCreateNewWatcherAndStop(t *testing.T) {
defer l2db.Close()
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2db)
rc.Start()
defer rc.Stop()
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = 0
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
@@ -44,7 +45,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
@@ -61,6 +62,11 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2cfg := cfg.L2Config
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
wc.Start()
defer wc.Stop()
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
@@ -72,7 +78,7 @@ func testMonitorBridgeContract(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
@@ -85,7 +91,7 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
@@ -95,7 +101,7 @@ func testMonitorBridgeContract(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
@@ -112,7 +118,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
@@ -134,13 +140,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc := prepareWatcherClient(l2Cli, db, address)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
var tx *geth_types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
@@ -156,7 +162,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
@@ -172,7 +178,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
@@ -184,13 +190,14 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -2,98 +2,120 @@
pragma solidity ^0.8.0;
contract MockBridgeL1 {
/******************************
* Events from L1MessageQueue *
******************************/
/// @notice Emitted when a new L1 => L2 transaction is appended to the queue.
/// @param sender The address of account who initiates the transaction.
/// @param target The address of account who will recieve the transaction.
/// @param value The value passed with the transaction.
/// @param queueIndex The index of this transaction in the queue.
/// @param gasLimit Gas limit required to complete the message relay on L2.
/// @param data The calldata of the transaction.
event QueueTransaction(
address indexed sender,
address indexed target,
uint256 value,
uint256 queueIndex,
uint256 gasLimit,
bytes data
);
/*********************************
* Events from L1ScrollMessenger *
*********************************/
/// @notice Emitted when a cross domain message is sent.
/// @param sender The address of the sender who initiates the message.
/// @param target The address of target contract to call.
/// @param value The amount of value passed to the target contract.
/// @param messageNonce The nonce of the message.
/// @param gasLimit The optional gas limit passed to L1 or L2.
/// @param message The calldata passed to the target contract.
event SentMessage(
address indexed sender,
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
uint256 gasLimit,
bytes message
);
event MessageDropped(bytes32 indexed msgHash);
/// @notice Emitted when a cross domain message is relayed successfully.
/// @param messageHash The hash of the message.
event RelayedMessage(bytes32 indexed messageHash);
event RelayedMessage(bytes32 indexed msgHash);
/// @dev The maximum number of transaction in on batch.
uint256 public immutable maxNumTxInBatch;
event FailedRelayedMessage(bytes32 indexed msgHash);
/// @dev The hash used for padding public inputs.
bytes32 public immutable paddingTxHash;
/************************
* Events from ZKRollup *
************************/
/***************************
* Events from ScrollChain *
***************************/
/// @notice Emitted when a new batch is commited.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @param batchHash The hash of the batch
event CommitBatch(bytes32 indexed batchHash);
/// @notice Emitted when a batch is reverted.
/// @param _batchId The identification of the batch.
event RevertBatch(bytes32 indexed _batchId);
/// @param batchHash The identification of the batch.
event RevertBatch(bytes32 indexed batchHash);
/// @notice Emitted when a batch is finalized.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @param batchHash The hash of the batch
event FinalizeBatch(bytes32 indexed batchHash);
/***********
* Structs *
***********/
struct L2MessageProof {
uint256 batchIndex;
uint256 blockHeight;
bytes merkleProof;
}
/// @dev The transanction struct
struct Layer2Transaction {
address caller;
uint64 nonce;
address target;
uint64 gas;
uint256 gasPrice;
uint256 value;
bytes data;
// signature
uint256 r;
uint256 s;
uint64 v;
}
/// @dev The block header struct
struct Layer2BlockHeader {
struct BlockContext {
// The hash of this block.
bytes32 blockHash;
// The parent hash of this block.
bytes32 parentHash;
uint256 baseFee;
bytes32 stateRoot;
uint64 blockHeight;
uint64 gasUsed;
// The height of this block.
uint64 blockNumber;
// The timestamp of this block.
uint64 timestamp;
bytes extraData;
Layer2Transaction[] txs;
// The base fee of this block.
// Currently, it is not used, because we disable EIP-1559.
// We keep it for future proof.
uint256 baseFee;
// The gas limit of this block.
uint64 gasLimit;
// The number of transactions in this block, both L1 & L2 txs.
uint16 numTransactions;
// The number of l1 messages in this block.
uint16 numL1Messages;
}
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
struct Layer2Batch {
struct Batch {
// The list of blocks in this batch
BlockContext[] blocks; // MAX_NUM_BLOCKS = 100, about 5 min
// The state root of previous batch.
// The first batch will use 0x0 for prevStateRoot
bytes32 prevStateRoot;
// The state root of the last block in this batch.
bytes32 newStateRoot;
// The withdraw trie root of the last block in this batch.
bytes32 withdrawTrieRoot;
// The index of the batch.
uint64 batchIndex;
// The hash of the last block in the parent batch
bytes32 parentHash;
Layer2BlockHeader[] blocks;
// The parent batch hash.
bytes32 parentBatchHash;
// Concatenated raw data of RLP encoded L2 txs
bytes l2Transactions;
}
struct Layer2BatchStored {
struct L2MessageProof {
// The hash of the batch where the message belongs to.
bytes32 batchHash;
bytes32 parentHash;
uint64 batchIndex;
bool verified;
// Concatenation of merkle proof for withdraw merkle trie.
bytes merkleProof;
}
/*************
@@ -103,27 +125,39 @@ contract MockBridgeL1 {
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/// @notice Mapping from batch id to batch struct.
mapping(bytes32 => Layer2BatchStored) public batches;
/***************
* Constructor *
***************/
constructor() {
maxNumTxInBatch = 44;
paddingTxHash = 0x0000000000000000000000000000000000000000000000000000000000000000;
}
/***********************************
* Functions from L2GasPriceOracle *
***********************************/
function setL2BaseFee(uint256) external {
}
/************************************
* Functions from L1ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
address target,
uint256 value,
bytes calldata message,
uint256 gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _value;
unchecked {
_value = msg.value - _fee;
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
{
address _sender = applyL1ToL2Alias(address(this));
emit QueueTransaction(_sender, target, 0, messageNonce, gasLimit, _xDomainCalldata);
}
uint256 _nonce = messageNonce;
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
messageNonce += 1;
}
@@ -131,57 +165,216 @@ contract MockBridgeL1 {
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
emit RelayedMessage(_xDomainCalldataHash);
}
/***************************
* Functions from ZKRollup *
***************************/
/******************************
* Functions from ScrollChain *
******************************/
function commitBatch(Layer2Batch memory _batch) external {
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
Layer2BatchStored storage _batchStored = batches[_batchId];
_batchStored.batchHash = _batchHash;
_batchStored.parentHash = _batch.parentHash;
_batchStored.batchIndex = _batch.batchIndex;
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
function commitBatch(Batch memory _batch) external {
_commitBatch(_batch);
}
function revertBatch(bytes32 _batchId) external {
emit RevertBatch(_batchId);
function commitBatches(Batch[] memory _batches) external {
for (uint256 i = 0; i < _batches.length; i++) {
_commitBatch(_batches[i]);
}
}
function revertBatch(bytes32 _batchHash) external {
emit RevertBatch(_batchHash);
}
function finalizeBatchWithProof(
bytes32 _batchId,
bytes32 _batchHash,
uint256[] memory,
uint256[] memory
) external {
Layer2BatchStored storage _batch = batches[_batchId];
uint256 _batchIndex = _batch.batchIndex;
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
emit FinalizeBatch(_batchHash);
}
/// @dev Internal function to compute a unique batch id for mapping.
/// @param _batchHash The hash of the batch.
/// @param _parentHash The hash of the batch.
/// @param _batchIndex The index of the batch.
/// @return Return the computed batch id.
function _computeBatchId(
bytes32 _batchHash,
bytes32 _parentHash,
uint256 _batchIndex
) internal pure returns (bytes32) {
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
/**********************
* Internal Functions *
**********************/
function _commitBatch(Batch memory _batch) internal {
bytes32 _batchHash = _computePublicInputHash(_batch);
emit CommitBatch(_batchHash);
}
}
/// @dev Internal function to generate the correct cross domain calldata for a message.
/// @param _sender Message sender address.
/// @param _target Target contract address.
/// @param _value The amount of ETH pass to the target.
/// @param _messageNonce Nonce for the provided message.
/// @param _message Message to send to the target.
/// @return ABI encoded cross domain calldata.
function _encodeXDomainCalldata(
address _sender,
address _target,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) internal pure returns (bytes memory) {
return
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
_sender,
_target,
_value,
_messageNonce,
_message
);
}
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
unchecked {
l2Address = address(uint160(l1Address) + offset);
}
}
/// @dev Internal function to compute the public input hash.
/// @param batch The batch to compute.
function _computePublicInputHash(Batch memory batch)
internal
view
returns (
bytes32
)
{
uint256 publicInputsPtr;
// 1. append prevStateRoot, newStateRoot and withdrawTrieRoot to public inputs
{
bytes32 prevStateRoot = batch.prevStateRoot;
bytes32 newStateRoot = batch.newStateRoot;
bytes32 withdrawTrieRoot = batch.withdrawTrieRoot;
// number of bytes in public inputs: 32 * 3 + 124 * blocks + 32 * MAX_NUM_TXS
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
assembly {
publicInputsPtr := mload(0x40)
mstore(0x40, add(publicInputsPtr, publicInputsSize))
mstore(publicInputsPtr, prevStateRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, newStateRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, withdrawTrieRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
uint64 numTransactionsInBatch;
BlockContext memory _block;
// 2. append block information to public inputs.
for (uint256 i = 0; i < batch.blocks.length; i++) {
// validate blocks, we won't check first block against previous batch.
{
BlockContext memory _currentBlock = batch.blocks[i];
if (i > 0) {
require(_block.blockHash == _currentBlock.parentHash, "Parent hash mismatch");
require(_block.blockNumber + 1 == _currentBlock.blockNumber, "Block number mismatch");
}
_block = _currentBlock;
}
// append blockHash and parentHash to public inputs
{
bytes32 blockHash = _block.blockHash;
bytes32 parentHash = _block.parentHash;
assembly {
mstore(publicInputsPtr, blockHash)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, parentHash)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
// append blockNumber and blockTimestamp to public inputs
{
uint256 blockNumber = _block.blockNumber;
uint256 blockTimestamp = _block.timestamp;
assembly {
mstore(publicInputsPtr, shl(192, blockNumber))
publicInputsPtr := add(publicInputsPtr, 0x8)
mstore(publicInputsPtr, shl(192, blockTimestamp))
publicInputsPtr := add(publicInputsPtr, 0x8)
}
}
// append baseFee to public inputs
{
uint256 baseFee = _block.baseFee;
assembly {
mstore(publicInputsPtr, baseFee)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
uint64 numTransactionsInBlock = _block.numTransactions;
// gasLimit, numTransactions and numL1Messages to public inputs
{
uint256 gasLimit = _block.gasLimit;
uint256 numL1MessagesInBlock = _block.numL1Messages;
assembly {
mstore(publicInputsPtr, shl(192, gasLimit))
publicInputsPtr := add(publicInputsPtr, 0x8)
mstore(publicInputsPtr, shl(240, numTransactionsInBlock))
publicInputsPtr := add(publicInputsPtr, 0x2)
mstore(publicInputsPtr, shl(240, numL1MessagesInBlock))
publicInputsPtr := add(publicInputsPtr, 0x2)
}
}
numTransactionsInBatch += numTransactionsInBlock;
}
require(numTransactionsInBatch <= maxNumTxInBatch, "Too many transactions in batch");
// 3. append transaction hash to public inputs.
uint256 _l2TxnPtr;
{
bytes memory l2Transactions = batch.l2Transactions;
assembly {
_l2TxnPtr := add(l2Transactions, 0x20)
}
}
for (uint256 i = 0; i < batch.blocks.length; i++) {
uint256 numL1MessagesInBlock = batch.blocks[i].numL1Messages;
require(numL1MessagesInBlock == 0);
uint256 numTransactionsInBlock = batch.blocks[i].numTransactions;
for (uint256 j = numL1MessagesInBlock; j < numTransactionsInBlock; ++j) {
bytes32 hash;
assembly {
let txPayloadLength := shr(224, mload(_l2TxnPtr))
_l2TxnPtr := add(_l2TxnPtr, 4)
_l2TxnPtr := add(_l2TxnPtr, txPayloadLength)
hash := keccak256(sub(_l2TxnPtr, txPayloadLength), txPayloadLength)
mstore(publicInputsPtr, hash)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
}
// 4. append padding transaction to public inputs.
bytes32 txHashPadding = paddingTxHash;
for (uint256 i = numTransactionsInBatch; i < maxNumTxInBatch; i++) {
assembly {
mstore(publicInputsPtr, txHashPadding)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
// 5. compute public input hash
bytes32 publicInputHash;
{
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
assembly {
publicInputHash := keccak256(sub(publicInputsPtr, publicInputsSize), publicInputsSize)
}
}
return publicInputHash;
}
}

View File

@@ -2,26 +2,56 @@
pragma solidity ^0.8.0;
contract MockBridgeL2 {
/******************************
* Events from L2MessageQueue *
******************************/
/// @notice Emitted when a new message is added to the merkle tree.
/// @param index The index of the corresponding message.
/// @param messageHash The hash of the corresponding message.
event AppendMessage(uint256 index, bytes32 messageHash);
/********************************
* Events from L1BlockContainer *
********************************/
/// @notice Emitted when a block is imported.
/// @param blockHash The hash of the imported block.
/// @param blockHeight The height of the imported block.
/// @param blockTimestamp The timestamp of the imported block.
/// @param baseFee The base fee of the imported block.
/// @param stateRoot The state root of the imported block.
event ImportBlock(
bytes32 indexed blockHash,
uint256 blockHeight,
uint256 blockTimestamp,
uint256 baseFee,
bytes32 stateRoot
);
/*********************************
* Events from L2ScrollMessenger *
*********************************/
/// @notice Emitted when a cross domain message is sent.
/// @param sender The address of the sender who initiates the message.
/// @param target The address of target contract to call.
/// @param value The amount of value passed to the target contract.
/// @param messageNonce The nonce of the message.
/// @param gasLimit The optional gas limit passed to L1 or L2.
/// @param message The calldata passed to the target contract.
event SentMessage(
address indexed sender,
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
uint256 gasLimit,
bytes message
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/// @notice Emitted when a cross domain message is relayed successfully.
/// @param messageHash The hash of the message.
event RelayedMessage(bytes32 indexed messageHash);
/*************
* Variables *
@@ -30,38 +60,70 @@ contract MockBridgeL2 {
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/***********************************
* Functions from L1GasPriceOracle *
***********************************/
function setL1BaseFee(uint256) external {
}
/************************************
* Functions from L2ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
uint256 _value,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _nonce = messageNonce;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce = _nonce + 1;
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, _to, _value, messageNonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
emit AppendMessage(messageNonce, _xDomainCalldataHash);
emit SentMessage(msg.sender, _to, _value, messageNonce, _gasLimit, _message);
messageNonce += 1;
}
function relayMessageWithProof(
function relayMessage(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message
bytes calldata _message
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
emit RelayedMessage(_xDomainCalldataHash);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to generate the correct cross domain calldata for a message.
/// @param _sender Message sender address.
/// @param _target Target contract address.
/// @param _value The amount of ETH pass to the target.
/// @param _messageNonce Nonce for the provided message.
/// @param _message Message to send to the target.
/// @return ABI encoded cross domain calldata.
function _encodeXDomainCalldata(
address _sender,
address _target,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) internal pure returns (bytes memory) {
return
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
_sender,
_target,
_value,
_messageNonce,
_message
);
}
}

View File

@@ -1,5 +1,8 @@
#!/bin/bash
# generates go bindings from contracts, to paste into abi/bridge_abi.go
# compile artifacts in /contracts folder with `forge build`` first
# Only run if it is ran from repository root.
if [[ ! -d "cmd" ]]
then
@@ -14,16 +17,20 @@ else
mkdir -p contracts
fi
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "ZKRollup")
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "rollup")
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "ZKRollup")
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "IScrollChain" "L1MessageQueue")
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "scrollchain" "l1_message_queue")
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "IScrollChain" "L1MessageQueue")
for i in "${!abi_name[@]}"; do
abi="bridge/abi/${abi_name[$i]}.json"
pkg="${pkg_name[$i]}"
mkdir -p tmp
abi="tmp/${abi_name[$i]}.json"
cat ../contracts/artifacts/src/${abi_name[$i]}.sol/${abi_name[$i]}.json | jq '.abi' > $abi
pkg="${pkg_name[$i]}_abi"
out="contracts/${pkg}/${gen_name[$i]}.go"
echo "generating ${out} from ${abi}"
mkdir -p contracts/$pkg
abigen --abi=$abi --pkg=$pkg --out=$out
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' $out > temp && mv temp $out
done
done
rm -rf tmp

View File

@@ -20,6 +20,8 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/config"
)
@@ -39,16 +41,6 @@ var (
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
)
// DefaultSenderConfig The default config
var DefaultSenderConfig = config.SenderConfig{
Endpoint: "",
EscalateBlocks: 3,
EscalateMultipleNum: 11,
EscalateMultipleDen: 10,
MaxGasPrice: 1000_000_000_000, // this is 1000 gwei
TxType: AccessListTxType,
}
// Confirmation struct used to indicate transaction confirmation details
type Confirmation struct {
ID string
@@ -95,9 +87,6 @@ type Sender struct {
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
if config == nil {
config = &DefaultSenderConfig
}
client, err := ethclient.Dial(config.Endpoint)
if err != nil {
return nil, err
@@ -120,6 +109,15 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return nil, err
}
var baseFeePerGas uint64
if config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
baseFeePerGas = header.BaseFee.Uint64()
} else {
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
sender := &Sender{
ctx: ctx,
config: config,
@@ -128,7 +126,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
auths: auths,
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: header.BaseFee.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: sync.Map{},
stopCh: make(chan struct{}),
}
@@ -154,21 +152,18 @@ func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (*FeeData, error) {
if gasLimit == 0 {
// estimate gas limit
var err error
gasLimit, err = s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
// @todo change it when Scroll enable EIP1559
if s.config.TxType != DynamicFeeTxType {
// estimate gas price
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
var gasPrice *big.Int
gasPrice, err = s.client.SuggestGasPrice(s.ctx)
if err != nil {
return nil, err
}
@@ -192,7 +187,7 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (hash common.Hash, err error) {
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
@@ -216,10 +211,9 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(auth, target, value, data, gasLimit); err != nil {
if feeData, err = s.getFeeData(auth, target, value, data); err != nil {
return
}
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{
@@ -356,11 +350,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
}
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
func (s *Sender) CheckPendingTransaction(header *types.Header) {
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
number := header.Number.Uint64()
atomic.StoreUint64(&s.blockNumber, number)
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
if s.config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
} else {
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
s.pendingTxs.Range(func(key, value interface{}) bool {
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
@@ -370,7 +373,7 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
if receipt.BlockNumber.Uint64() <= confirmed {
s.pendingTxs.Delete(key)
// send confirm message
s.confirmCh <- &Confirmation{
@@ -444,7 +447,14 @@ func (s *Sender) loop(ctx context.Context) {
log.Error("failed to get latest head", "err", err)
continue
}
s.CheckPendingTransaction(header)
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
if err != nil {
log.Error("failed to get latest confirmed block number", "err", err)
continue
}
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
// Check and set balance.
_ = s.auths.checkAndSetBalances(ctx)

View File

@@ -14,6 +14,7 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
@@ -68,7 +69,7 @@ func testBatchSender(t *testing.T, batchSize int) {
}
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = 0
senderCfg.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
@@ -87,7 +88,7 @@ func testBatchSender(t *testing.T, batchSize int) {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if errors.Is(err, sender.ErrNoAvailableAccount) {
<-time.After(time.Second)
continue

View File

@@ -4,18 +4,20 @@ import (
"context"
"crypto/ecdsa"
"math/big"
"scroll-tech/common/docker"
"testing"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker"
)
var (
@@ -43,8 +45,8 @@ var (
l1MessengerAddress common.Address
// l1 rollup contract
l1RollupInstance *mock_bridge.MockBridgeL1
l1RollupAddress common.Address
scrollChainInstance *mock_bridge.MockBridgeL1
scrollChainAddress common.Address
// l2 messenger contract
l2MessengerInstance *mock_bridge.MockBridgeL2
@@ -59,16 +61,20 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
gasOraclePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121215"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = 0
cfg.L1Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
@@ -97,8 +103,10 @@ func setupEnv(t *testing.T) {
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l1Auth, l1Client, gasOraclePrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, gasOraclePrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
@@ -158,10 +166,10 @@ func prepareContracts(t *testing.T) {
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L1 rollup contract
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
// L1 ScrolChain contract
_, tx, scrollChainInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
scrollChainAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L2 messenger contract
@@ -171,12 +179,16 @@ func prepareContracts(t *testing.T) {
assert.NoError(t, err)
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
cfg.L1Config.RollupContractAddress = l1RollupAddress
cfg.L1Config.L1MessageQueueAddress = l1MessengerAddress
cfg.L1Config.ScrollChainContractAddress = scrollChainAddress
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
cfg.L1Config.RelayerConfig.GasPriceOracleContractAddress = l1MessengerAddress
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
cfg.L2Config.L2MessageQueueAddress = l2MessengerAddress
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
cfg.L2Config.RelayerConfig.GasPriceOracleContractAddress = l2MessengerAddress
}
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
@@ -195,8 +207,15 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l1 message
t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Run("TestRelayL2MessageSucceed", testRelayL2MessageSucceed)
// l1/l2 gas oracle
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
t.Cleanup(func() {
free(t)

View File

@@ -0,0 +1,129 @@
package tests
import (
"context"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testImportL1GasPrice(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
l1Cfg := cfg.L1Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
assert.Greater(t, number, startHeight-1)
assert.NoError(t, err)
err = l1Watcher.FetchBlockHeader(number)
assert.NoError(t, err)
// check db status
latestBlockHeight, err := db.GetLatestL1BlockHeight()
assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight)
blocks, err := db.GetL1BlockInfos(map[string]interface{}{
"number": latestBlockHeight,
})
assert.NoError(t, err)
assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOraclePending)
assert.Equal(t, blocks[0].OracleTxHash.Valid, false)
// relay gas price
l1Relayer.ProcessGasPriceOracle()
blocks, err = db.GetL1BlockInfos(map[string]interface{}{
"number": latestBlockHeight,
})
assert.NoError(t, err)
assert.Equal(t, len(blocks), 1)
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOracleImporting)
assert.Equal(t, blocks[0].OracleTxHash.Valid, true)
}
func testImportL2GasPrice(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
l2Cfg := cfg.L2Config
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// add fake blocks
traces := []*geth_types.BlockTrace{
{
Header: &geth_types.Header{
Number: big.NewInt(1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &geth_types.StorageTrace{},
},
}
err = db.InsertL2BlockTraces(traces)
assert.NoError(t, err)
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
traces[0],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
assert.NoError(t, dbTx.Commit())
// check db status
batch, err := db.GetLatestBatch()
assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOraclePending)
assert.Equal(t, batch.OracleTxHash.Valid, false)
// relay gas price
l2Relayer.ProcessGasPriceOracle()
batch, err = db.GetLatestBatch()
assert.NoError(t, err)
assert.Equal(t, batch.OracleStatus, types.GasOracleImporting)
assert.Equal(t, batch.OracleTxHash.Valid, true)
}

View File

@@ -0,0 +1,86 @@
package tests
import (
"context"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testRelayL1MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
l1Cfg := cfg.L1Config
l2Cfg := cfg.L2Config
// Create L1Relayer
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
defer l1Relayer.Stop()
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l1MessengerInstance.SendMessage(l1Auth, l2Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l1Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l1 watch process events
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgPending)
assert.Equal(t, msg.Target, l2Auth.From.String())
// process l1 messages
l1Relayer.ProcessSavedEvents()
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgSubmitted)
relayTxHash, err := db.GetRelayL1MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, types.MsgConfirmed)
}

View File

@@ -3,19 +3,21 @@ package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testRelayL2MessageSucceed(t *testing.T) {
@@ -25,23 +27,21 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(3)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
confirmations := rpc.LatestBlockNumber
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
// Create L2Relayer
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
@@ -50,7 +50,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
@@ -60,62 +60,60 @@ func testRelayL2MessageSucceed(t *testing.T) {
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Status, types.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
traces := []*geth_types.BlockTrace{
{
Header: &types.Header{
Header: &geth_types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
StorageTrace: &geth_types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
err = db.InsertL2BlockTraces(traces)
assert.NoError(t, err)
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
traces[0],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String()
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
l2Relayer.SendCommitTx([]*types.BatchData{batchData})
status, err := db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.Equal(t, types.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchHash)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
@@ -127,16 +125,16 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
assert.Equal(t, types.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.Equal(t, types.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
@@ -148,15 +146,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
assert.Equal(t, types.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
l2Relayer.ProcessSavedEvents()
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
assert.Equal(t, msg.Status, types.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
@@ -171,5 +169,5 @@ func testRelayL2MessageSucceed(t *testing.T) {
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
assert.Equal(t, msg.Status, types.MsgConfirmed)
}

View File

@@ -3,19 +3,20 @@ package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testCommitBatchAndFinalizeBatch(t *testing.T) {
@@ -29,66 +30,63 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var traces []*types.BlockTrace
var traces []*geth_types.BlockTrace
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := types.Header{
header := geth_types.Header{
Number: big.NewInt(int64(i)),
ParentHash: parentHash,
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &types.BlockTrace{
traces = append(traces, &geth_types.BlockTrace{
Header: &header,
StorageTrace: &types.StorageTrace{},
StorageTrace: &geth_types.StorageTrace{},
})
parentHash = header.Hash()
}
err = db.InsertBlockTraces(traces)
err = db.InsertL2BlockTraces(traces)
assert.NoError(t, err)
parentBatch := &types.BlockBatch{
Index: 0,
Hash: "0x0000000000000000000000000000000000000000",
}
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
traces[0],
traces[1],
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
batchHash := batchData.Hash().String()
// add one batch to db
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[1].Header.Number.Uint64(),
Hash: traces[1].Header.Hash().String(),
ParentHash: traces[1].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
for i, block := range batchData.Batch.Blocks {
blockIDs[i] = block.BlockNumber
}
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
assert.NoError(t, err)
assert.NoError(t, dbTx.Commit())
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
l2Relayer.SendCommitTx([]*types.BatchData{batchData})
status, err := db.GetRollupStatus(batchID)
status, err := db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.Equal(t, types.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchHash)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
@@ -100,27 +98,25 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
assert.Equal(t, types.RollupCommitted, status)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchID)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.Equal(t, types.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
@@ -132,7 +128,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
status, err = db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
assert.Equal(t, types.RollupFinalized, status)
}

View File

@@ -0,0 +1,56 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -0,0 +1,107 @@
package utils_test
import (
"context"
"encoding/json"
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/bridge/utils"
)
var (
tests = []struct {
input string
mustFail bool
expected rpc.BlockNumber
}{
{`"0x"`, true, rpc.BlockNumber(0)},
{`"0x0"`, false, rpc.BlockNumber(0)},
{`"0X1"`, false, rpc.BlockNumber(1)},
{`"0x00"`, true, rpc.BlockNumber(0)},
{`"0x01"`, true, rpc.BlockNumber(0)},
{`"0x1"`, false, rpc.BlockNumber(1)},
{`"0x12"`, false, rpc.BlockNumber(18)},
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
{"0", true, rpc.BlockNumber(0)},
{`"ff"`, true, rpc.BlockNumber(0)},
{`"safe"`, false, rpc.SafeBlockNumber},
{`"finalized"`, false, rpc.FinalizedBlockNumber},
{`"pending"`, false, rpc.PendingBlockNumber},
{`"latest"`, false, rpc.LatestBlockNumber},
{`"earliest"`, false, rpc.EarliestBlockNumber},
{`someString`, true, rpc.BlockNumber(0)},
{`""`, true, rpc.BlockNumber(0)},
{``, true, rpc.BlockNumber(0)},
}
)
func TestUnmarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
err := json.Unmarshal([]byte(test.input), &num)
if test.mustFail && err == nil {
t.Errorf("Test %d should fail", i)
continue
}
if !test.mustFail && err != nil {
t.Errorf("Test %d should pass but got err: %v", i, err)
continue
}
if num != test.expected {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
func TestMarshalJSON(t *testing.T) {
for i, test := range tests {
var num rpc.BlockNumber
want, err := json.Marshal(test.expected)
assert.Nil(t, err)
if !test.mustFail {
err = json.Unmarshal([]byte(test.input), &num)
assert.Nil(t, err)
got, err := json.Marshal(&num)
assert.Nil(t, err)
if string(want) != string(got) {
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
}
}
}
}
type MockEthClient struct {
val uint64
}
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
return e.val, nil
}
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
}
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
ctx := context.Background()
client := MockEthClient{}
client.val = 5
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(0), confirmed)
client.val = 7
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
assert.Nil(t, err)
assert.Equal(t, uint64(1), confirmed)
}

View File

@@ -1,21 +1,20 @@
package utils
import (
"bytes"
"fmt"
"math/big"
"github.com/iden3/go-iden3-crypto/keccak256"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
bridgeabi "scroll-tech/bridge/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(keccak256.Hash(append(a.Bytes()[:], b.Bytes()[:]...)))
}
func encodePacked(input ...[]byte) []byte {
return bytes.Join(input, nil)
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// ComputeMessageHash compute the message hash
@@ -23,21 +22,11 @@ func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
message []byte,
messageNonce *big.Int,
message []byte,
) common.Hash {
packed := encodePacked(
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),
math.U256Bytes(messageNonce),
message,
)
return common.BytesToHash(keccak256.Hash(packed))
data, _ := bridgeabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
@@ -67,3 +56,43 @@ func BufferToUint256Le(buffer []byte) []*big.Int {
}
return buffer256
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
// @todo: add unit test.
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}

View File

@@ -29,13 +29,11 @@ func TestKeccak2(t *testing.T) {
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
big.NewInt(1),
[]byte("testbridgecontract"),
)
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
}

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as chef
WORKDIR app
FROM chef as planner
@@ -13,10 +13,11 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as base
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
@@ -33,11 +34,16 @@ FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/verifier/lib
# ENV CHAIN_ID=534353
RUN mkdir -p /src/coordinator/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
COPY --from=builder /bin/coordinator /bin/
ENTRYPOINT ["/bin/coordinator"]

View File

@@ -1,6 +1,6 @@
GO_VERSION := 1.18
PYTHON_VERSION := 3.10
RUST_VERSION := nightly-2022-08-23
RUST_VERSION := nightly-2022-12-10
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner

View File

@@ -4,4 +4,4 @@ FROM golang:1.18-alpine
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev

View File

@@ -1,8 +1,8 @@
FROM golang:1.18-alpine
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
# RUN apk add --no-cache libc6-compat
# RUN apk add --no-cache gcompat

View File

@@ -14,7 +14,7 @@ ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
# Add Toolchain
RUN rustup toolchain install nightly-2022-08-23
RUN rustup toolchain install nightly-2022-12-10
# TODO: make this ARG
ENV CARGO_CHEF_TAG=0.1.41

View File

@@ -1,10 +1,11 @@
ARG ALPINE_VERSION=3.15
FROM alpine:${ALPINE_VERSION}
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
RUN apk add --no-cache \
ca-certificates \
openssl-dev \
gcc \
git \
musl-dev

View File

@@ -13,4 +13,4 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
# Add Toolchain
RUN rustup toolchain install nightly-2022-08-23
RUN rustup toolchain install nightly-2022-12-10

View File

@@ -1,6 +1,6 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
TAGNAME = ''
pipeline {
agent any
options {
@@ -41,16 +41,38 @@ pipeline {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
sh "docker login --username=$dockerUser --password=$dockerPassword"
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
script {
try {
sh "docker manifest inspect scrolltech/bridge:$TAGNAME > /dev/null"
} catch (e) {
// only build if the tag non existed
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
throw e
}
}
}
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
script {
try {
sh "docker manifest inspect scrolltech/coordinator:$TAGNAME > /dev/null"
} catch (e) {
// only build if the tag non existed
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C coordinator docker"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
throw e
}
}
}
}
}
}
}
}
}
}

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:prealpha-v4.2
FROM scrolltech/l2geth:prealpha-v5.1
RUN mkdir -p /l2geth/keystore

View File

@@ -6,19 +6,20 @@ require (
github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.16
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
gotest.tools v2.2.0+incompatible
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
@@ -27,14 +28,15 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/ethereum/go-ethereum v1.11.1 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
@@ -48,7 +50,6 @@ require (
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -64,26 +65,25 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/scroll-tech/zktrie v0.5.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/net v0.6.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect

View File

@@ -78,8 +78,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -126,15 +126,15 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.11.1 h1:EMymmWFzpS7G9l9NvVN8G73cgdUIqDPNRf2YTSGBXlk=
github.com/ethereum/go-ethereum v1.11.1/go.mod h1:DuefStAgaxoaYGLR0FueVcVbehmn5n9QUcVrMCuOvuc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
@@ -148,8 +148,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@@ -159,8 +160,9 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -199,8 +201,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -269,8 +271,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
@@ -283,12 +285,10 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -308,16 +308,17 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -372,7 +373,6 @@ github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQm
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -396,19 +396,16 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6 h1:2kXWJR+mOj09HBh5sUTb4L/OURPSXoQd1NC/10v7otM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6/go.mod h1:eW+eyNdMoO0MyuczCc9xWSnW8dPJ0kOy5xsxgOKYEaA=
github.com/scroll-tech/zktrie v0.5.0 h1:dABDR6lMZq6Hs+fWQSiHbX8s3AOX6hY+5nkhSYm5rmU=
github.com/scroll-tech/zktrie v0.5.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -425,8 +422,9 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -448,11 +446,12 @@ github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYa
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
@@ -482,8 +481,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -541,8 +540,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -599,15 +598,15 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -619,13 +618,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -731,6 +731,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=

File diff suppressed because it is too large Load Diff

View File

@@ -7,9 +7,16 @@ edition = "2021"
[lib]
crate-type = ["staticlib"]
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[patch."https://github.com/scroll-tech/zktrie.git"]
zktrie = { git = "https://github.com/lispc/zktrie", branch = "scroll-dev-0215" }
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -1 +1 @@
nightly-2022-08-23
nightly-2022-12-10

View File

@@ -44,7 +44,7 @@ pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *c
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_multi(traces.as_slice())
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)

View File

@@ -214,8 +214,9 @@ func (z *ProofDetail) Hash() ([]byte, error) {
// AggProof includes the proof and public input that are required to verification and rollup.
type AggProof struct {
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
BlockCount uint `json:"block_count"`
}

53
common/metrics/metrics.go Normal file
View File

@@ -0,0 +1,53 @@
package metrics
import (
"context"
"net"
"net/http"
"strconv"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
// Serve starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Serve(ctx context.Context, c *cli.Context) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
address := net.JoinHostPort(
c.String(utils.MetricsAddr.Name),
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
)
server := &http.Server{
Addr: address,
Handler: prometheus.Handler(metrics.DefaultRegistry),
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
}
go func() {
<-ctx.Done()
if err := server.Close(); err != nil {
log.Error("Failed to close metrics server", "error", err)
}
}()
log.Info("Starting metrics server", "address", address)
go func() {
if err := server.ListenAndServe(); err != nil {
log.Error("start metrics server error", "error", err)
}
}()
}

View File

@@ -1,4 +1,5 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,

View File

@@ -1,4 +1,5 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 3,

237
common/types/batch.go Normal file
View File

@@ -0,0 +1,237 @@
package types
import (
"bufio"
"bytes"
"encoding/binary"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
abi "scroll-tech/bridge/abi"
)
// PublicInputHashConfig is the configuration of how to compute the public input hash.
type PublicInputHashConfig struct {
MaxTxNum int `json:"max_tx_num"`
PaddingTxHash common.Hash `json:"padding_tx_hash"`
}
const defaultMaxTxNum = 44
var defaultPaddingTxHash = [32]byte{}
// BatchData contains info of batch to be committed.
type BatchData struct {
Batch abi.IScrollChainBatch
TxHashes []common.Hash
TotalTxNum uint64
TotalL1TxNum uint64
TotalL2Gas uint64
// cache for the BatchHash
hash *common.Hash
// The config to compute the public input hash, or the block hash.
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
return b.hash
}
buf := make([]byte, 8)
hasher := crypto.NewKeccakState()
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
// @todo: panic on error here.
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
// 2. hash all block contexts
for _, block := range b.Batch.Blocks {
// write BlockHash & ParentHash
_, _ = hasher.Write(block.BlockHash[:])
_, _ = hasher.Write(block.ParentHash[:])
// write BlockNumber
binary.BigEndian.PutUint64(buf, block.BlockNumber)
_, _ = hasher.Write(buf)
// write Timestamp
binary.BigEndian.PutUint64(buf, block.Timestamp)
_, _ = hasher.Write(buf)
// write BaseFee
var baseFee [32]byte
if block.BaseFee != nil {
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
}
_, _ = hasher.Write(baseFee[:])
// write GasLimit
binary.BigEndian.PutUint64(buf, block.GasLimit)
_, _ = hasher.Write(buf)
// write NumTransactions
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
_, _ = hasher.Write(buf[:2])
// write NumL1Messages
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
_, _ = hasher.Write(buf[:2])
}
// 3. add all tx hashes
for _, txHash := range b.TxHashes {
_, _ = hasher.Write(txHash[:])
}
// 4. append empty tx hash up to MaxTxNum
maxTxNum := defaultMaxTxNum
paddingTxHash := common.Hash(defaultPaddingTxHash)
if b.piCfg != nil {
maxTxNum = b.piCfg.MaxTxNum
paddingTxHash = b.piCfg.PaddingTxHash
}
for i := len(b.TxHashes); i < maxTxNum; i++ {
_, _ = hasher.Write(paddingTxHash[:])
}
b.hash = new(common.Hash)
_, _ = hasher.Read(b.hash[:])
return b.hash
}
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
// included in this batch
func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCfg *PublicInputHashConfig) *BatchData {
batchData := new(BatchData)
batch := &batchData.Batch
// set BatchIndex, ParentBatchHash
batch.BatchIndex = parentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blockTraces))
var batchTxDataBuf bytes.Buffer
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
for i, trace := range blockTraces {
batchData.TotalTxNum += uint64(len(trace.Transactions))
batchData.TotalL2Gas += trace.Header.GasUsed
// set baseFee to 0 when it's nil in the block header
baseFee := trace.Header.BaseFee
if baseFee == nil {
baseFee = big.NewInt(0)
}
batch.Blocks[i] = abi.IScrollChainBlockContext{
BlockHash: trace.Header.Hash(),
ParentHash: trace.Header.ParentHash,
BlockNumber: trace.Header.Number.Uint64(),
Timestamp: trace.Header.Time,
BaseFee: baseFee,
GasLimit: trace.Header.GasLimit,
NumTransactions: uint16(len(trace.Transactions)),
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
}
// fill in RLP-encoded transactions
for _, txData := range trace.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
_, _ = batchTxDataWriter.Write(rlpTxData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
}
// set PrevStateRoot from the first block
if i == 0 {
batch.PrevStateRoot = trace.StorageTrace.RootBefore
}
// set NewStateRoot & WithdrawTrieRoot from the last block
if i == len(blockTraces)-1 {
batch.NewStateRoot = trace.Header.Root
batch.WithdrawTrieRoot = trace.WithdrawTrieRoot
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg
return batchData
}
// NewGenesisBatchData generates the batch that contains the genesis block.
func NewGenesisBatchData(genesisBlockTrace *types.BlockTrace) *BatchData {
header := genesisBlockTrace.Header
if header.Number.Uint64() != 0 {
panic("invalid genesis block trace: block number is not 0")
}
batchData := new(BatchData)
batch := &batchData.Batch
// fill in batch information
batch.BatchIndex = 0
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
batch.NewStateRoot = header.Root
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
// L2Transactions should be empty
// fill in block context
batch.Blocks[0] = abi.IScrollChainBlockContext{
BlockHash: header.Hash(),
ParentHash: header.ParentHash,
BlockNumber: header.Number.Uint64(),
Timestamp: header.Time,
BaseFee: header.BaseFee,
GasLimit: header.GasLimit,
NumTransactions: 0,
NumL1Messages: 0,
}
return batchData
}
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
func newByte32FromBytes(b []byte) [32]byte {
var byte32 [32]byte
if len(b) > 32 {
b = b[len(b)-32:]
}
copy(byte32[32-len(b):], b)
return byte32
}

View File

@@ -0,0 +1,99 @@
package types
import (
"math/big"
"testing"
"gotest.tools/assert"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
abi "scroll-tech/bridge/abi"
)
func TestBatchHash(t *testing.T) {
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
tx := new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData := new(BatchData)
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 4,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
batch := &batchData.Batch
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
block := abi.IScrollChainBlockContext{
BlockNumber: 51966,
Timestamp: 123456789,
BaseFee: new(big.Int).SetUint64(0),
GasLimit: 10000000000000000,
NumTransactions: 1,
NumL1Messages: 0,
}
batch.Blocks = append(batch.Blocks, block)
hash := batchData.Hash()
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
// use a different tx hash
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
tx = new(geth_types.Transaction)
if err := tx.UnmarshalBinary(txBytes); err != nil {
t.Fatalf("invalid tx hex string: %s", err)
}
batchData.TxHashes[0] = tx.Hash()
batchData.hash = nil // clear the cache
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
}
func TestNewGenesisBatch(t *testing.T) {
genesisBlock := &geth_types.Header{
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(1),
Number: big.NewInt(0),
GasLimit: 940000000,
GasUsed: 0,
Time: 1639724192,
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
BaseFee: big.NewInt(1000000000),
}
assert.Equal(
t,
genesisBlock.Hash().Hex(),
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
"wrong genesis block header",
)
blockTrace := &geth_types.BlockTrace{
Coinbase: nil,
Header: genesisBlock,
Transactions: []*geth_types.TransactionData{},
StorageTrace: nil,
ExecutionResults: []*geth_types.ExecutionResult{},
MPTWitness: nil,
}
batchData := NewGenesisBatchData(blockTrace)
t.Log(batchData.Batch.Blocks[0])
batchData.piCfg = &PublicInputHashConfig{
MaxTxNum: 25,
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
}
assert.Equal(
t,
batchData.Hash().Hex(),
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
"wrong genesis batch hash",
)
}

250
common/types/db.go Normal file
View File

@@ -0,0 +1,250 @@
// Package types defines the table schema data structure used in the database tables
package types
import (
"database/sql"
"fmt"
"time"
)
// L1BlockStatus represents current l1 block processing status
type L1BlockStatus int
// GasOracleStatus represents current gas oracle processing status
type GasOracleStatus int
const (
// L1BlockUndefined : undefined l1 block status
L1BlockUndefined L1BlockStatus = iota
// L1BlockPending represents the l1 block status is pending
L1BlockPending
// L1BlockImporting represents the l1 block status is importing
L1BlockImporting
// L1BlockImported represents the l1 block status is imported
L1BlockImported
// L1BlockFailed represents the l1 block status is failed
L1BlockFailed
)
const (
// GasOracleUndefined : undefined gas oracle status
GasOracleUndefined GasOracleStatus = iota
// GasOraclePending represents the gas oracle status is pending
GasOraclePending
// GasOracleImporting represents the gas oracle status is importing
GasOracleImporting
// GasOracleImported represents the gas oracle status is imported
GasOracleImported
// GasOracleFailed represents the gas oracle status is failed
GasOracleFailed
)
// L1BlockInfo is structure of stored l1 block
type L1BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
HeaderRLP string `json:"header_rlp" db:"header_rlp"`
BaseFee uint64 `json:"base_fee" db:"base_fee"`
BlockStatus L1BlockStatus `json:"block_status" db:"block_status"`
GasOracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
}
// MsgStatus represents current layer1 transaction processing status
type MsgStatus int
const (
// MsgUndefined : undefined msg status
MsgUndefined MsgStatus = iota
// MsgPending represents the from_layer message status is pending
MsgPending
// MsgSubmitted represents the from_layer message status is submitted
MsgSubmitted
// MsgConfirmed represents the from_layer message status is confirmed
MsgConfirmed
// MsgFailed represents the from_layer message status is failed
MsgFailed
// MsgExpired represents the from_layer message status is expired
MsgExpired
)
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
QueueIndex uint64 `json:"queue_index" db:"queue_index"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// L2Message is structure of stored layer2 bridge message
type L2Message struct {
Nonce uint64 `json:"nonce" db:"nonce"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Value string `json:"value" db:"value"`
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Status MsgStatus `json:"status" db:"status"`
}
// BlockInfo is structure of stored `block_trace` without `trace`
type BlockInfo struct {
Number uint64 `json:"number" db:"number"`
Hash string `json:"hash" db:"hash"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
BatchHash sql.NullString `json:"batch_hash" db:"batch_hash"`
TxNum uint64 `json:"tx_num" db:"tx_num"`
GasUsed uint64 `json:"gas_used" db:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
}
// RollerProveStatus is the roller prove status of a block batch (session)
type RollerProveStatus int32
const (
// RollerAssigned indicates roller assigned but has not submitted proof
RollerAssigned RollerProveStatus = iota
// RollerProofValid indicates roller has submitted valid proof
RollerProofValid
// RollerProofInvalid indicates roller has submitted invalid proof
RollerProofInvalid
)
func (s RollerProveStatus) String() string {
switch s {
case RollerAssigned:
return "RollerAssigned"
case RollerProofValid:
return "RollerProofValid"
case RollerProofInvalid:
return "RollerProofInvalid"
default:
return fmt.Sprintf("Bad Value: %d", int32(s))
}
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status RollerProveStatus `json:"status"`
}
// SessionInfo is assigned rollers info of a block batch (session)
type SessionInfo struct {
ID string `json:"id"`
Rollers map[string]*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
}
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
type ProvingStatus int
const (
// ProvingStatusUndefined : undefined proving_task status
ProvingStatusUndefined ProvingStatus = iota
// ProvingTaskUnassigned : proving_task is not assigned to be proved
ProvingTaskUnassigned
// ProvingTaskSkipped : proving_task is skipped for proof generation
ProvingTaskSkipped
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified
// ProvingTaskFailed : fail to generate proof
ProvingTaskFailed
)
func (ps ProvingStatus) String() string {
switch ps {
case ProvingTaskUnassigned:
return "unassigned"
case ProvingTaskSkipped:
return "skipped"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProved:
return "proved"
case ProvingTaskVerified:
return "verified"
case ProvingTaskFailed:
return "failed"
default:
return "undefined"
}
}
// RollupStatus block_batch rollup_status (pending, committing, committed, finalizing, finalized)
type RollupStatus int
const (
// RollupUndefined : undefined rollup status
RollupUndefined RollupStatus = iota
// RollupPending : batch is pending to rollup to layer1
RollupPending
// RollupCommitting : rollup transaction is submitted to layer1
RollupCommitting
// RollupCommitted : rollup transaction is confirmed to layer1
RollupCommitted
// RollupFinalizing : finalize transaction is submitted to layer1
RollupFinalizing
// RollupFinalized : finalize transaction is confirmed to layer1
RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
)
// BlockBatch is structure of stored block_batch
type BlockBatch struct {
Hash string `json:"hash" db:"hash"`
Index uint64 `json:"index" db:"index"`
ParentHash string `json:"parent_hash" db:"parent_hash"`
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
StateRoot string `json:"state_root" db:"state_root"`
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
Proof []byte `json:"proof" db:"proof"`
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"`
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
}

View File

@@ -1,19 +0,0 @@
package utils
import (
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ComputeBatchID compute a unique hash for a batch using "endBlockHash" & "endBlockHash in last batch"
// & "batch height", following the logic in `_computeBatchId` in contracts/src/L1/rollup/ZKRollup.sol
func ComputeBatchID(endBlockHash common.Hash, lastEndBlockHash common.Hash, index *big.Int) string {
indexBytes := make([]byte, 32)
return crypto.Keccak256Hash(
endBlockHash.Bytes(),
lastEndBlockHash.Bytes(),
index.FillBytes(indexBytes),
).String()
}

View File

@@ -1,40 +0,0 @@
package utils_test
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/stretchr/testify/assert"
"scroll-tech/common/utils"
)
func TestComputeBatchID(t *testing.T) {
// expected generated using contract:
// ```
// // SPDX-License-Identifier: MIT
// pragma solidity ^0.6.6;
// contract AAA {
// uint256 private constant MAX = ~uint256(0);
// function _computeBatchId() public pure returns (bytes32) {
// return keccak256(abi.encode(bytes32(0), bytes32(0), MAX));
// }
// }
// ```
expected := "0xafe1e714d2cd3ed5b0fa0a04ee95cd564b955ab8661c5665588758b48b66e263"
actual := utils.ComputeBatchID(common.Hash{}, common.Hash{}, math.MaxBig256)
assert.Equal(t, expected, actual)
expected = "0xe05698242b035c0e4d1d58e8ab89507ac7a1403b17fd6a7ea87621a32674ec88"
actual = utils.ComputeBatchID(
common.HexToHash("0xfaef7761204f43c4ab2528a65fcc7ec2108709e5ebb646bdce9ce3c8862d3f25"),
common.HexToHash("0xe3abef08cce4b8a0dcc6b7e4dd11f32863007a86f46c1d136682b5d77bdf0f7a"),
big.NewInt(77233900))
assert.Equal(t, expected, actual)
}

View File

@@ -12,6 +12,9 @@ var (
&LogFileFlag,
&LogJSONFormat,
&LogDebugFlag,
&MetricsEnabled,
&MetricsAddr,
&MetricsPort,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
@@ -42,4 +45,25 @@ var (
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
// MetricsEnabled enable metrics collection and reporting
MetricsEnabled = cli.BoolFlag{
Name: "metrics",
Usage: "Enable metrics collection and reporting",
Category: "METRICS",
Value: false,
}
// MetricsAddr is listening address of Metrics reporting server
MetricsAddr = cli.StringFlag{
Name: "metrics.addr",
Usage: "Metrics reporting server listening address",
Category: "METRICS",
Value: "0.0.0.0",
}
// MetricsPort is listening port of Metrics reporting server
MetricsPort = cli.IntFlag{
Name: "metrics.port",
Usage: "Metrics reporting server listening port",
Category: "METRICS",
Value: 6060,
}
)

View File

@@ -38,10 +38,14 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr,
}
// StartWSEndpoint starts the WS RPC endpoint.
func StartWSEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr, error) {
func StartWSEndpoint(endpoint string, apis []rpc.API, compressionLevel int) (*http.Server, net.Addr, error) {
handler, addr, err := StartHTTPEndpoint(endpoint, apis)
if err == nil {
srv := (handler.Handler).(*rpc.Server)
err = srv.SetCompressionLevel(compressionLevel)
if err != nil {
log.Error("failed to set ws compression level", "compression level", compressionLevel, "err", err)
}
handler.Handler = srv.WebsocketHandler(nil)
}
return handler, addr, err

View File

@@ -1,6 +1,7 @@
package utils
import (
"compress/flate"
"context"
"testing"
@@ -59,7 +60,7 @@ func TestStartWSEndpoint(t *testing.T) {
Namespace: "test",
Service: new(testService),
},
})
}, flate.NoCompression)
assert.NoError(t, err)
defer handler.Shutdown(context.Background())

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v11.14"
var tag = "alpha-v1.13"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -13,7 +13,7 @@ The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT in layer 1 and
### batchDepositERC1155
```solidity
function batchDepositERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchDepositERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Deposit a list of some ERC1155 NFT to caller&#39;s account on layer 2.
@@ -32,7 +32,7 @@ Deposit a list of some ERC1155 NFT to caller&#39;s account on layer 2.
### batchDepositERC1155
```solidity
function batchDepositERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
function batchDepositERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external payable
```
Deposit a list of some ERC1155 NFT to a recipient&#39;s account on layer 2.
@@ -69,7 +69,7 @@ The address of corresponding L1/L2 Gateway contract.
### depositERC1155
```solidity
function depositERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function depositERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Deposit some ERC1155 NFT to a recipient&#39;s account on layer 2.
@@ -89,7 +89,7 @@ Deposit some ERC1155 NFT to a recipient&#39;s account on layer 2.
### depositERC1155
```solidity
function depositERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
function depositERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external payable
```
Deposit some ERC1155 NFT to caller&#39;s account on layer 2.
@@ -126,17 +126,6 @@ Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipie
| _tokenIds | uint256[] | The list of token ids to withdraw. |
| _amounts | uint256[] | The list of corresponding number of token to withdraw. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
### finalizeWithdrawERC1155
```solidity
@@ -164,7 +153,7 @@ Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient&#39
function initialize(address _counterpart, address _messenger) external nonpayable
```
Initialize the storage of L1ERC1155Gateway.
@@ -172,8 +161,8 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _messenger | address | undefined |
| _counterpart | address | The address of L2ERC1155Gateway in L2. |
| _messenger | address | The address of L1ScrollMessenger. |
### messenger
@@ -181,7 +170,7 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.

View File

@@ -13,7 +13,7 @@ The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT in layer 1 and fi
### batchDepositERC721
```solidity
function batchDepositERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchDepositERC721(address _token, address _to, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Deposit a list of some ERC721 NFT to a recipient&#39;s account on layer 2.
@@ -32,7 +32,7 @@ Deposit a list of some ERC721 NFT to a recipient&#39;s account on layer 2.
### batchDepositERC721
```solidity
function batchDepositERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external nonpayable
function batchDepositERC721(address _token, uint256[] _tokenIds, uint256 _gasLimit) external payable
```
Deposit a list of some ERC721 NFT to caller&#39;s account on layer 2.
@@ -67,7 +67,7 @@ The address of corresponding L1/L2 Gateway contract.
### depositERC721
```solidity
function depositERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function depositERC721(address _token, address _to, uint256 _tokenId, uint256 _gasLimit) external payable
```
Deposit some ERC721 NFT to a recipient&#39;s account on layer 2.
@@ -86,7 +86,7 @@ Deposit some ERC721 NFT to a recipient&#39;s account on layer 2.
### depositERC721
```solidity
function depositERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external nonpayable
function depositERC721(address _token, uint256 _tokenId, uint256 _gasLimit) external payable
```
Deposit some ERC721 NFT to caller&#39;s account on layer 2.
@@ -121,17 +121,6 @@ Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient
| _to | address | The address of recipient in layer 1 to receive the token. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
### finalizeWithdrawERC721
```solidity
@@ -158,7 +147,7 @@ Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s
function initialize(address _counterpart, address _messenger) external nonpayable
```
Initialize the storage of L1ERC721Gateway.
@@ -166,8 +155,8 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _messenger | address | undefined |
| _counterpart | address | The address of L2ERC721Gateway in L2. |
| _messenger | address | The address of L1ScrollMessenger. |
### messenger
@@ -175,7 +164,7 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.

View File

@@ -26,23 +26,6 @@ Mapping from ERC20 token address to corresponding L1ERC20Gateway.
|---|---|---|
| _0 | address | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### counterpart
```solidity
function counterpart() external view returns (address)
```
The address of corresponding L1/L2 Gateway contract.
#### Returns
| Name | Type | Description |
@@ -126,10 +109,10 @@ Deposit some token to a recipient&#39;s account on L2 and call.
### depositETH
```solidity
function depositETH(address _to, uint256 _gasLimit) external payable
function depositETH(uint256 _amount, uint256 _gasLimit) external payable
```
Deposit ETH to recipient&#39;s account in L2.
Deposit ETH to caller&#39;s account in L2.
@@ -137,16 +120,16 @@ Deposit ETH to recipient&#39;s account in L2.
| Name | Type | Description |
|---|---|---|
| _to | address | The address of recipient&#39;s account on L2. |
| _gasLimit | uint256 | Gas limit required to complete the deposit on L2. |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### depositETH
```solidity
function depositETH(uint256 _gasLimit) external payable
function depositETH(address _to, uint256 _amount, uint256 _gasLimit) external payable
```
Deposit ETH to call&#39;s account in L2.
Deposit ETH to some recipient&#39;s account in L2.
@@ -154,18 +137,45 @@ Deposit ETH to call&#39;s account in L2.
| Name | Type | Description |
|---|---|---|
| _gasLimit | uint256 | Gas limit required to complete the deposit on L2. |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### finalizeDropMessage
### depositETHAndCall
```solidity
function finalizeDropMessage() external payable
function depositETHAndCall(address _to, uint256 _amount, bytes _data, uint256 _gasLimit) external payable
```
Deposit ETH to some recipient&#39;s account in L2 and call the target contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| _gasLimit | uint256 | undefined |
### ethGateway
```solidity
function ethGateway() external view returns (address)
```
The address of L1ETHGateway.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### finalizeWithdrawERC20
@@ -191,21 +201,21 @@ Complete ERC20 withdraw from L2 to L1 and send fund to recipient&#39;s account i
### finalizeWithdrawETH
```solidity
function finalizeWithdrawETH(address _from, address _to, uint256 _amount, bytes _data) external payable
function finalizeWithdrawETH(address, address, uint256, bytes) external payable
```
Complete ETH withdraw from L2 to L1 and send fund to recipient&#39;s account in L1.
*This function should only be called by L1ScrollMessenger. This function should also only be called by L2GatewayRouter in L2.*
*This function should only be called by L1ScrollMessenger. This function should also only be called by L1ETHGateway in L2.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _from | address | The address of account who withdraw ETH in L2. |
| _to | address | The address of recipient in L1 to receive ETH. |
| _amount | uint256 | The amount of ETH to withdraw. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _0 | address | undefined |
| _1 | address | undefined |
| _2 | uint256 | undefined |
| _3 | bytes | undefined |
### getERC20Gateway
@@ -254,10 +264,10 @@ Return the corresponding l2 token address given l1 token address.
### initialize
```solidity
function initialize(address _defaultERC20Gateway, address _counterpart, address _messenger) external nonpayable
function initialize(address _ethGateway, address _defaultERC20Gateway) external nonpayable
```
Initialize the storage of L1GatewayRouter.
@@ -265,26 +275,8 @@ function initialize(address _defaultERC20Gateway, address _counterpart, address
| Name | Type | Description |
|---|---|---|
| _defaultERC20Gateway | address | undefined |
| _counterpart | address | undefined |
| _messenger | address | undefined |
### messenger
```solidity
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
| _ethGateway | address | The address of L1ETHGateway contract. |
| _defaultERC20Gateway | address | The address of default ERC20 Gateway contract. |
### owner
@@ -314,23 +306,6 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
### router
```solidity
function router() external view returns (address)
```
The address of L1GatewayRouter/L2GatewayRouter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### setDefaultERC20Gateway
```solidity
@@ -364,6 +339,22 @@ Update the mapping from token address to gateway address.
| _tokens | address[] | The list of addresses of tokens to update. |
| _gateways | address[] | The list of addresses of gateways to update. |
### setETHGateway
```solidity
function setETHGateway(address _ethGateway) external nonpayable
```
Update the address of ETH gateway contract.
*This function should only be called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _ethGateway | address | The address to update. |
### transferOwnership
```solidity
@@ -387,10 +378,10 @@ function transferOwnership(address newOwner) external nonpayable
### DepositERC20
```solidity
event DepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event DepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone deposit ERC20 token from L1 to L2.
@@ -398,20 +389,20 @@ event DepositERC20(address indexed _l1Token, address indexed _l2Token, address i
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### DepositETH
```solidity
event DepositETH(address indexed _from, address indexed _to, uint256 _amount, bytes _data)
event DepositETH(address indexed from, address indexed to, uint256 amount, bytes data)
```
Emitted when someone deposit ETH from L1 to L2.
@@ -419,18 +410,18 @@ event DepositETH(address indexed _from, address indexed _to, uint256 _amount, by
| Name | Type | Description |
|---|---|---|
| _from `indexed` | address | undefined |
| _to `indexed` | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| from `indexed` | address | undefined |
| to `indexed` | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### FinalizeWithdrawERC20
```solidity
event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeWithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
@@ -438,20 +429,20 @@ event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token,
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### FinalizeWithdrawETH
```solidity
event FinalizeWithdrawETH(address indexed _from, address indexed _to, uint256 _amount, bytes _data)
event FinalizeWithdrawETH(address indexed from, address indexed to, uint256 amount, bytes data)
```
Emitted when ETH is withdrawn from L2 to L1 and transfer to recipient.
@@ -459,10 +450,10 @@ event FinalizeWithdrawETH(address indexed _from, address indexed _to, uint256 _a
| Name | Type | Description |
|---|---|---|
| _from `indexed` | address | undefined |
| _to `indexed` | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| from `indexed` | address | undefined |
| to `indexed` | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### OwnershipTransferred
@@ -484,10 +475,10 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
### SetDefaultERC20Gateway
```solidity
event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway)
event SetDefaultERC20Gateway(address indexed defaultERC20Gateway)
```
Emitted when the address of default ERC20 Gateway is updated.
@@ -495,15 +486,15 @@ event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway)
| Name | Type | Description |
|---|---|---|
| _defaultERC20Gateway `indexed` | address | undefined |
| defaultERC20Gateway `indexed` | address | undefined |
### SetERC20Gateway
```solidity
event SetERC20Gateway(address indexed _token, address indexed _gateway)
event SetERC20Gateway(address indexed token, address indexed gateway)
```
Emitted when the `gateway` for `token` is updated.
@@ -511,8 +502,24 @@ event SetERC20Gateway(address indexed _token, address indexed _gateway)
| Name | Type | Description |
|---|---|---|
| _token `indexed` | address | undefined |
| _gateway `indexed` | address | undefined |
| token `indexed` | address | undefined |
| gateway `indexed` | address | undefined |
### SetETHGateway
```solidity
event SetETHGateway(address indexed ethGateway)
```
Emitted when the address of ETH Gateway is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| ethGateway `indexed` | address | undefined |

View File

@@ -10,13 +10,13 @@ The `L1ScrollMessenger` contract can: 1. send messages from layer 1 to layer 2;
## Methods
### dropDelayDuration
### counterpart
```solidity
function dropDelayDuration() external view returns (uint256)
function counterpart() external view returns (address)
```
The amount of seconds needed to wait if we want to drop message.
The address of counterpart ScrollMessenger contract in L1/L2.
@@ -25,38 +25,15 @@ The amount of seconds needed to wait if we want to drop message.
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
| _0 | address | undefined |
### dropMessage
### feeVault
```solidity
function dropMessage(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message, uint256 _gasLimit) external nonpayable
function feeVault() external view returns (address)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _fee | uint256 | undefined |
| _deadline | uint256 | undefined |
| _nonce | uint256 | undefined |
| _message | bytes | undefined |
| _gasLimit | uint256 | undefined |
### gasOracle
```solidity
function gasOracle() external view returns (address)
```
The gas oracle used to estimate transaction fee on layer 2.
The address of fee vault, collecting cross domain messaging fee.
@@ -70,10 +47,10 @@ The gas oracle used to estimate transaction fee on layer 2.
### initialize
```solidity
function initialize(address _rollup) external nonpayable
function initialize(address _counterpart, address _feeVault, address _rollup, address _messageQueue) external nonpayable
```
Initialize the storage of L1ScrollMessenger.
@@ -81,56 +58,15 @@ function initialize(address _rollup) external nonpayable
| Name | Type | Description |
|---|---|---|
| _rollup | address | undefined |
| _counterpart | address | The address of L2ScrollMessenger contract in L2. |
| _feeVault | address | The address of fee vault, which will be used to collect relayer fee. |
| _rollup | address | The address of ScrollChain contract. |
| _messageQueue | address | The address of L1MessageQueue contract. |
### isMessageDropped
### isL1MessageRelayed
```solidity
function isMessageDropped(bytes32) external view returns (bool)
```
Mapping from message hash to drop status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isMessageExecuted
```solidity
function isMessageExecuted(bytes32) external view returns (bool)
```
Mapping from message hash to execution status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isMessageRelayed
```solidity
function isMessageRelayed(bytes32) external view returns (bool)
function isL1MessageRelayed(bytes32) external view returns (bool)
```
Mapping from relay id to relay status.
@@ -149,6 +85,67 @@ Mapping from relay id to relay status.
|---|---|---|
| _0 | bool | undefined |
### isL1MessageSent
```solidity
function isL1MessageSent(bytes32) external view returns (bool)
```
Mapping from L1 message hash to sent status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isL2MessageExecuted
```solidity
function isL2MessageExecuted(bytes32) external view returns (bool)
```
Mapping from L2 message hash to a boolean value indicating if the message has been successfully executed.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### messageQueue
```solidity
function messageQueue() external view returns (address)
```
The address of L1MessageQueue contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### owner
```solidity
@@ -197,7 +194,7 @@ function paused() external view returns (bool)
### relayMessageWithProof
```solidity
function relayMessageWithProof(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message, IL1ScrollMessenger.L2MessageProof _proof) external nonpayable
function relayMessageWithProof(address _from, address _to, uint256 _value, uint256 _nonce, bytes _message, IL1ScrollMessenger.L2MessageProof _proof) external nonpayable
```
@@ -211,8 +208,6 @@ function relayMessageWithProof(address _from, address _to, uint256 _value, uint2
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _fee | uint256 | undefined |
| _deadline | uint256 | undefined |
| _nonce | uint256 | undefined |
| _message | bytes | undefined |
| _proof | IL1ScrollMessenger.L2MessageProof | undefined |
@@ -231,7 +226,7 @@ function renounceOwnership() external nonpayable
### replayMessage
```solidity
function replayMessage(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, bytes _message, uint256 _queueIndex, uint32 _oldGasLimit, uint32 _newGasLimit) external nonpayable
function replayMessage(address _from, address _to, uint256 _value, uint256 _queueIndex, bytes _message, uint32 _oldGasLimit, uint32 _newGasLimit) external nonpayable
```
Replay an exsisting message.
@@ -242,15 +237,13 @@ Replay an exsisting message.
| Name | Type | Description |
|---|---|---|
| _from | address | The address of the sender of the message. |
| _to | address | The address of the recipient of the message. |
| _value | uint256 | The msg.value passed to the message call. |
| _fee | uint256 | The amount of fee in ETH to charge. |
| _deadline | uint256 | The deadline of the message. |
| _message | bytes | The content of the message. |
| _queueIndex | uint256 | CTC Queue index for the message to replay. |
| _oldGasLimit | uint32 | Original gas limit used to send the message. |
| _newGasLimit | uint32 | New gas limit to be used for this message. |
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _queueIndex | uint256 | undefined |
| _message | bytes | undefined |
| _oldGasLimit | uint32 | undefined |
| _newGasLimit | uint32 | undefined |
### rollup
@@ -272,21 +265,21 @@ The address of Rollup contract.
### sendMessage
```solidity
function sendMessage(address _to, uint256 _fee, bytes _message, uint256 _gasLimit) external payable
function sendMessage(address _to, uint256 _value, bytes _message, uint256 _gasLimit) external payable
```
Send cross chain message (L1 =&gt; L2 or L2 =&gt; L1)
Send cross chain message from L1 to L2 or L2 to L1.
*Currently, only privileged accounts can call this function for safty. And adding an extra `_fee` variable make it more easy to upgrade to decentralized version.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _to | address | The address of account who recieve the message. |
| _fee | uint256 | The amount of fee in Ether the caller would like to pay to the relayer. |
| _message | bytes | The content of the message. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _to | address | undefined |
| _value | uint256 | undefined |
| _message | bytes | undefined |
| _gasLimit | uint256 | undefined |
### transferOwnership
@@ -304,13 +297,13 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateDropDelayDuration
### updateFeeVault
```solidity
function updateDropDelayDuration(uint256 _newDuration) external nonpayable
function updateFeeVault(address _newFeeVault) external nonpayable
```
Update the drop delay duration.
Update fee vault contract.
*This function can only called by contract owner.*
@@ -318,23 +311,7 @@ Update the drop delay duration.
| Name | Type | Description |
|---|---|---|
| _newDuration | uint256 | The new delay duration to update. |
### updateGasOracle
```solidity
function updateGasOracle(address _newGasOracle) external nonpayable
```
Update the address of gas oracle.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newGasOracle | address | The address to update. |
| _newFeeVault | address | The address of new fee vault contract. |
### updateWhitelist
@@ -393,10 +370,10 @@ See {IScrollMessenger-xDomainMessageSender}
### FailedRelayedMessage
```solidity
event FailedRelayedMessage(bytes32 indexed msgHash)
event FailedRelayedMessage(bytes32 indexed messageHash)
```
Emitted when a cross domain message is failed to relay.
@@ -404,23 +381,7 @@ event FailedRelayedMessage(bytes32 indexed msgHash)
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
### MessageDropped
```solidity
event MessageDropped(bytes32 indexed msgHash)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
| messageHash `indexed` | bytes32 | undefined |
### OwnershipTransferred
@@ -458,10 +419,10 @@ event Paused(address account)
### RelayedMessage
```solidity
event RelayedMessage(bytes32 indexed msgHash)
event RelayedMessage(bytes32 indexed messageHash)
```
Emitted when a cross domain message is relayed successfully.
@@ -469,15 +430,15 @@ event RelayedMessage(bytes32 indexed msgHash)
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
| messageHash `indexed` | bytes32 | undefined |
### SentMessage
```solidity
event SentMessage(address indexed target, address sender, uint256 value, uint256 fee, uint256 deadline, bytes message, uint256 messageNonce, uint256 gasLimit)
event SentMessage(address indexed sender, address indexed target, uint256 value, uint256 messageNonce, uint256 gasLimit, bytes message)
```
Emitted when a cross domain message is sent.
@@ -485,14 +446,12 @@ event SentMessage(address indexed target, address sender, uint256 value, uint256
| Name | Type | Description |
|---|---|---|
| sender `indexed` | address | undefined |
| target `indexed` | address | undefined |
| sender | address | undefined |
| value | uint256 | undefined |
| fee | uint256 | undefined |
| deadline | uint256 | undefined |
| message | bytes | undefined |
| messageNonce | uint256 | undefined |
| gasLimit | uint256 | undefined |
| message | bytes | undefined |
### Unpaused
@@ -510,13 +469,13 @@ event Unpaused(address account)
|---|---|---|
| account | address | undefined |
### UpdateDropDelayDuration
### UpdateFeeVault
```solidity
event UpdateDropDelayDuration(uint256 _oldDuration, uint256 _newDuration)
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault)
```
Emitted when owner updates drop delay duration
Emitted when owner updates fee vault contract.
@@ -524,25 +483,8 @@ Emitted when owner updates drop delay duration
| Name | Type | Description |
|---|---|---|
| _oldDuration | uint256 | undefined |
| _newDuration | uint256 | undefined |
### UpdateGasOracle
```solidity
event UpdateGasOracle(address _oldGasOracle, address _newGasOracle)
```
Emitted when owner updates gas oracle contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldGasOracle | address | undefined |
| _newGasOracle | address | undefined |
| _oldFeeVault | address | undefined |
| _newFeeVault | address | undefined |
### UpdateWhitelist

View File

@@ -84,17 +84,6 @@ Deposit some token to a recipient&#39;s account on L2 and call.
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _gasLimit | uint256 | Gas limit required to complete the deposit on L2. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
### finalizeWithdrawERC20
```solidity
@@ -144,7 +133,7 @@ Return the corresponding l2 token address given l1 token address.
function initialize(address _counterpart, address _router, address _messenger, address _l2TokenImplementation, address _l2TokenFactory) external nonpayable
```
Initialize the storage of L1StandardERC20Gateway.
@@ -152,11 +141,11 @@ function initialize(address _counterpart, address _router, address _messenger, a
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _router | address | undefined |
| _messenger | address | undefined |
| _l2TokenImplementation | address | undefined |
| _l2TokenFactory | address | undefined |
| _counterpart | address | The address of L2StandardERC20Gateway in L2. |
| _router | address | The address of L1GatewayRouter. |
| _messenger | address | The address of L1ScrollMessenger. |
| _l2TokenImplementation | address | The address of ScrollStandardERC20 implementation in L2. |
| _l2TokenFactory | address | The address of ScrollStandardERC20Factory contract in L2. |
### l2TokenFactory
@@ -198,7 +187,7 @@ The address of ScrollStandardERC20 implementation in L2.
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -233,10 +222,10 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
### DepositERC20
```solidity
event DepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event DepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone deposit ERC20 token from L1 to L2.
@@ -244,20 +233,20 @@ event DepositERC20(address indexed _l1Token, address indexed _l2Token, address i
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### FinalizeWithdrawERC20
```solidity
event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeWithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
@@ -265,12 +254,12 @@ event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token,
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |

View File

@@ -101,17 +101,6 @@ Deposit some token to a recipient&#39;s account on L2 and call.
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _gasLimit | uint256 | Gas limit required to complete the deposit on L2. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
### finalizeWithdrawERC20
```solidity
@@ -158,10 +147,10 @@ Return the corresponding l2 token address given l1 token address.
### initialize
```solidity
function initialize(address _counterpart, address _router, address _messenger, address _WETH, address _l2WETH) external nonpayable
function initialize(address _counterpart, address _router, address _messenger) external nonpayable
```
Initialize the storage of L1WETHGateway.
@@ -169,11 +158,9 @@ function initialize(address _counterpart, address _router, address _messenger, a
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _router | address | undefined |
| _messenger | address | undefined |
| _WETH | address | undefined |
| _l2WETH | address | undefined |
| _counterpart | address | The address of L2ETHGateway in L2. |
| _router | address | The address of L1GatewayRouter. |
| _messenger | address | The address of L1ScrollMessenger. |
### l2WETH
@@ -198,7 +185,7 @@ The address of L2 WETH address.
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -233,10 +220,10 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
### DepositERC20
```solidity
event DepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event DepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone deposit ERC20 token from L1 to L2.
@@ -244,20 +231,20 @@ event DepositERC20(address indexed _l1Token, address indexed _l2Token, address i
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### FinalizeWithdrawERC20
```solidity
event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeWithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
@@ -265,12 +252,12 @@ event FinalizeWithdrawERC20(address indexed _l1Token, address indexed _l2Token,
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |

View File

@@ -16,7 +16,7 @@ The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 an
function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -35,7 +35,7 @@ function batchWithdrawERC1155(address _token, uint256[] _tokenIds, uint256[] _am
function batchWithdrawERC1155(address _token, address _to, uint256[] _tokenIds, uint256[] _amounts, uint256 _gasLimit) external nonpayable
```
Batch withdraw a list of ERC1155 NFT to caller&#39;s account on layer 1.
@@ -72,9 +72,9 @@ The address of corresponding L1/L2 Gateway contract.
function finalizeBatchDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds, uint256[] _amounts) external nonpayable
```
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
#### Parameters
@@ -93,9 +93,9 @@ function finalizeBatchDepositERC1155(address _l1Token, address _l2Token, address
function finalizeDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId, uint256 _amount) external nonpayable
```
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
#### Parameters
@@ -108,17 +108,6 @@ function finalizeDepositERC1155(address _l1Token, address _l2Token, address _fro
| _tokenId | uint256 | undefined |
| _amount | uint256 | undefined |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
### initialize
```solidity
@@ -142,7 +131,7 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -333,7 +322,7 @@ Update layer 2 to layer 1 token mapping.
function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
@@ -352,7 +341,7 @@ function withdrawERC1155(address _token, uint256 _tokenId, uint256 _amount, uint
function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256 _amount, uint256 _gasLimit) external nonpayable
```
Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
@@ -373,10 +362,10 @@ function withdrawERC1155(address _token, address _to, uint256 _tokenId, uint256
### BatchWithdrawERC1155
```solidity
event BatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
event BatchWithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
@@ -384,20 +373,20 @@ event BatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, a
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
| _amounts | uint256[] | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenIds | uint256[] | undefined |
| amounts | uint256[] | undefined |
### FinalizeBatchDepositERC1155
```solidity
event FinalizeBatchDepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
event FinalizeBatchDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
@@ -405,20 +394,20 @@ event FinalizeBatchDepositERC1155(address indexed _l1Token, address indexed _l2T
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
| _amounts | uint256[] | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenIds | uint256[] | undefined |
| amounts | uint256[] | undefined |
### FinalizeDepositERC1155
```solidity
event FinalizeDepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
event FinalizeDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
@@ -426,12 +415,12 @@ event FinalizeDepositERC1155(address indexed _l1Token, address indexed _l2Token,
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
| _amount | uint256 | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### OwnershipTransferred
@@ -470,10 +459,10 @@ Emitted when token mapping for ERC1155 token is updated.
### WithdrawERC1155
```solidity
event WithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
event WithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to gateway in layer 2.
@@ -481,12 +470,12 @@ event WithdrawERC1155(address indexed _l1Token, address indexed _l2Token, addres
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
| _amount | uint256 | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |

View File

@@ -24,9 +24,9 @@ Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 2. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _tokenIds | uint256[] | undefined |
| _gasLimit | uint256 | undefined |
### batchWithdrawERC721
@@ -42,10 +42,10 @@ Batch withdraw a list of ERC721 NFT to caller&#39;s account on layer 1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 2. |
| _to | address | The address of recipient in layer 1. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
| _gasLimit | uint256 | undefined |
### counterpart
@@ -78,11 +78,11 @@ Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 1. |
| _to | address | The address of recipient in layer 2 to receive the token. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
| _l1Token | address | undefined |
| _l2Token | address | undefined |
| _from | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
### finalizeDepositERC721
@@ -98,22 +98,11 @@ Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 1. |
| _to | address | The address of recipient in layer 2 to receive the token. |
| _tokenId | uint256 | The token id to withdraw. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
| _l1Token | address | undefined |
| _l2Token | address | undefined |
| _from | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
### initialize
@@ -138,7 +127,7 @@ function initialize(address _counterpart, address _messenger) external nonpayabl
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -288,9 +277,9 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 2. |
| _tokenId | uint256 | The token id to withdraw. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _tokenId | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC721
@@ -306,10 +295,10 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 2. |
| _to | address | The address of recipient in layer 1. |
| _tokenId | uint256 | The token id to withdraw. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
| _gasLimit | uint256 | undefined |
@@ -318,7 +307,7 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
### BatchWithdrawERC721
```solidity
event BatchWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
event BatchWithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
@@ -329,16 +318,16 @@ Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenIds | uint256[] | undefined |
### FinalizeBatchDepositERC721
```solidity
event FinalizeBatchDepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
event FinalizeBatchDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
@@ -349,16 +338,16 @@ Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenIds | uint256[] | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenIds | uint256[] | undefined |
### FinalizeDepositERC721
```solidity
event FinalizeDepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
event FinalizeDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to recipient in layer 2.
@@ -369,11 +358,11 @@ Emitted when the ERC721 NFT is transfered to recipient in layer 2.
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenId | uint256 | undefined |
### OwnershipTransferred
@@ -412,7 +401,7 @@ Emitted when token mapping for ERC721 token is updated.
### WithdrawERC721
```solidity
event WithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
event WithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to gateway in layer 2.
@@ -423,11 +412,11 @@ Emitted when the ERC721 NFT is transfered to gateway in layer 2.
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _tokenId | uint256 | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| tokenId | uint256 | undefined |

View File

@@ -32,13 +32,13 @@ Mapping from L2 ERC20 token address to corresponding L2ERC20Gateway.
|---|---|---|
| _0 | address | undefined |
### counterpart
### defaultERC20Gateway
```solidity
function counterpart() external view returns (address)
function defaultERC20Gateway() external view returns (address)
```
The address of corresponding L1/L2 Gateway contract.
The addess of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.
@@ -49,13 +49,13 @@ The address of corresponding L1/L2 Gateway contract.
|---|---|---|
| _0 | address | undefined |
### defaultERC20Gateway
### ethGateway
```solidity
function defaultERC20Gateway() external view returns (address)
function ethGateway() external view returns (address)
```
The addess of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.
The address of L2ETHGateway.
@@ -90,7 +90,7 @@ Complete a deposit from L1 to L2 and send fund to recipient&#39;s account in L2.
### finalizeDepositETH
```solidity
function finalizeDepositETH(address _from, address _to, uint256 _amount, bytes _data) external payable
function finalizeDepositETH(address, address, uint256, bytes) external payable
```
Complete ETH deposit from L1 to L2 and send fund to recipient&#39;s account in L2.
@@ -101,21 +101,10 @@ Complete ETH deposit from L1 to L2 and send fund to recipient&#39;s account in L
| Name | Type | Description |
|---|---|---|
| _from | address | The address of account who deposit ETH in L1. |
| _to | address | The address of recipient in L2 to receive ETH. |
| _amount | uint256 | The amount of ETH to deposit. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
| _0 | address | undefined |
| _1 | address | undefined |
| _2 | uint256 | undefined |
| _3 | bytes | undefined |
### getERC20Gateway
@@ -186,7 +175,7 @@ Return the corresponding l2 token address given l1 token address.
### initialize
```solidity
function initialize(address _defaultERC20Gateway, address _counterpart, address _messenger) external nonpayable
function initialize(address _ethGateway, address _defaultERC20Gateway) external nonpayable
```
@@ -197,26 +186,8 @@ function initialize(address _defaultERC20Gateway, address _counterpart, address
| Name | Type | Description |
|---|---|---|
| _ethGateway | address | undefined |
| _defaultERC20Gateway | address | undefined |
| _counterpart | address | undefined |
| _messenger | address | undefined |
### messenger
```solidity
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### owner
@@ -246,23 +217,6 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
### router
```solidity
function router() external view returns (address)
```
The address of L1GatewayRouter/L2GatewayRouter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### setDefaultERC20Gateway
```solidity
@@ -296,6 +250,22 @@ Update the mapping from token address to gateway address.
| _tokens | address[] | The list of addresses of tokens to update. |
| _gateways | address[] | The list of addresses of gateways to update. |
### setETHGateway
```solidity
function setETHGateway(address _ethGateway) external nonpayable
```
Update the address of ETH gateway contract.
*This function should only be called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _ethGateway | address | The address to update. |
### transferOwnership
```solidity
@@ -326,9 +296,9 @@ Withdraw of some token to a caller&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20
@@ -344,10 +314,10 @@ Withdraw of some token to a recipient&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20AndCall
@@ -363,16 +333,16 @@ Withdraw of some token to a recipient&#39;s account on L1 and call.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| _gasLimit | uint256 | undefined |
### withdrawETH
```solidity
function withdrawETH(address _to, uint256 _gasLimit) external payable
function withdrawETH(address _to, uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw ETH to caller&#39;s account in L1.
@@ -383,13 +353,14 @@ Withdraw ETH to caller&#39;s account in L1.
| Name | Type | Description |
|---|---|---|
| _to | address | The address of recipient&#39;s account on L1. |
| _gasLimit | uint256 | Gas limit required to complete the withdraw on L1. |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawETH
```solidity
function withdrawETH(uint256 _gasLimit) external payable
function withdrawETH(uint256 _amount, uint256 _gasLimit) external payable
```
Withdraw ETH to caller&#39;s account in L1.
@@ -400,7 +371,27 @@ Withdraw ETH to caller&#39;s account in L1.
| Name | Type | Description |
|---|---|---|
| _gasLimit | uint256 | Gas limit required to complete the withdraw on L1. |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawETHAndCall
```solidity
function withdrawETHAndCall(address _to, uint256 _amount, bytes _data, uint256 _gasLimit) external payable
```
Withdraw ETH to caller&#39;s account in L1.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| _gasLimit | uint256 | undefined |
@@ -409,10 +400,10 @@ Withdraw ETH to caller&#39;s account in L1.
### FinalizeDepositERC20
```solidity
event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeDepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
@@ -420,20 +411,20 @@ event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, a
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### FinalizeDepositETH
```solidity
event FinalizeDepositETH(address indexed _from, address indexed _to, uint256 _amount, bytes _data)
event FinalizeDepositETH(address indexed from, address indexed to, uint256 amount, bytes data)
```
Emitted when ETH is deposited from L1 to L2 and transfer to recipient.
@@ -441,10 +432,10 @@ event FinalizeDepositETH(address indexed _from, address indexed _to, uint256 _am
| Name | Type | Description |
|---|---|---|
| _from `indexed` | address | undefined |
| _to `indexed` | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| from `indexed` | address | undefined |
| to `indexed` | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### OwnershipTransferred
@@ -466,10 +457,10 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
### SetDefaultERC20Gateway
```solidity
event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway)
event SetDefaultERC20Gateway(address indexed defaultERC20Gateway)
```
Emitted when the address of default ERC20 Gateway is updated.
@@ -477,15 +468,15 @@ event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway)
| Name | Type | Description |
|---|---|---|
| _defaultERC20Gateway `indexed` | address | undefined |
| defaultERC20Gateway `indexed` | address | undefined |
### SetERC20Gateway
```solidity
event SetERC20Gateway(address indexed _token, address indexed _gateway)
event SetERC20Gateway(address indexed token, address indexed gateway)
```
Emitted when the `gateway` for `token` is updated.
@@ -493,16 +484,32 @@ event SetERC20Gateway(address indexed _token, address indexed _gateway)
| Name | Type | Description |
|---|---|---|
| _token `indexed` | address | undefined |
| _gateway `indexed` | address | undefined |
| token `indexed` | address | undefined |
| gateway `indexed` | address | undefined |
### SetETHGateway
```solidity
event SetETHGateway(address indexed ethGateway)
```
Emitted when the address of ETH Gateway is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| ethGateway `indexed` | address | undefined |
### WithdrawERC20
```solidity
event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event WithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone withdraw ERC20 token from L2 to L1.
@@ -510,20 +517,20 @@ event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### WithdrawETH
```solidity
event WithdrawETH(address indexed _from, address indexed _to, uint256 _amount, bytes _data)
event WithdrawETH(address indexed from, address indexed to, uint256 amount, bytes data)
```
Emitted when someone withdraw ETH from L2 to L1.
@@ -531,10 +538,10 @@ event WithdrawETH(address indexed _from, address indexed _to, uint256 _amount, b
| Name | Type | Description |
|---|---|---|
| _from `indexed` | address | undefined |
| _to `indexed` | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| from `indexed` | address | undefined |
| to `indexed` | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |

View File

@@ -10,13 +10,13 @@ The `L2ScrollMessenger` contract can: 1. send messages from layer 2 to layer 1;
## Methods
### dropDelayDuration
### blockContainer
```solidity
function dropDelayDuration() external view returns (uint256)
function blockContainer() external view returns (address)
```
The amount of seconds needed to wait if we want to drop message.
The contract contains the list of L1 blocks.
@@ -25,30 +25,41 @@ The amount of seconds needed to wait if we want to drop message.
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
| _0 | address | undefined |
### dropMessage
### counterpart
```solidity
function dropMessage(address, address, uint256, uint256, uint256, uint256, bytes, uint256) external nonpayable
function counterpart() external view returns (address)
```
The address of counterpart ScrollMessenger contract in L1/L2.
#### Parameters
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### feeVault
```solidity
function feeVault() external view returns (address)
```
The address of fee vault, collecting cross domain messaging fee.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
| _1 | address | undefined |
| _2 | uint256 | undefined |
| _3 | uint256 | undefined |
| _4 | uint256 | undefined |
| _5 | uint256 | undefined |
| _6 | bytes | undefined |
| _7 | uint256 | undefined |
### gasOracle
@@ -56,7 +67,7 @@ function dropMessage(address, address, uint256, uint256, uint256, uint256, bytes
function gasOracle() external view returns (address)
```
The gas oracle used to estimate transaction fee on layer 2.
The address of L2MessageQueue.
@@ -67,13 +78,30 @@ The gas oracle used to estimate transaction fee on layer 2.
|---|---|---|
| _0 | address | undefined |
### isMessageExecuted
### initialize
```solidity
function isMessageExecuted(bytes32) external view returns (bool)
function initialize(address _counterpart, address _feeVault) external nonpayable
```
Mapping from message hash to execution status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _feeVault | address | undefined |
### isL1MessageExecuted
```solidity
function isL1MessageExecuted(bytes32) external view returns (bool)
```
Mapping from L1 message hash to a boolean value indicating if the message has been successfully executed.
@@ -89,13 +117,13 @@ Mapping from message hash to execution status.
|---|---|---|
| _0 | bool | undefined |
### isMessageRelayed
### isL2MessageSent
```solidity
function isMessageRelayed(bytes32) external view returns (bool)
function isL2MessageSent(bytes32) external view returns (bool)
```
Mapping from relay id to relay status.
Mapping from L2 message hash to sent status.
@@ -111,13 +139,35 @@ Mapping from relay id to relay status.
|---|---|---|
| _0 | bool | undefined |
### messageNonce
### l1MessageFailedTimes
```solidity
function messageNonce() external view returns (uint256)
function l1MessageFailedTimes(bytes32) external view returns (uint256)
```
Message nonce, used to avoid relay attack.
Mapping from L1 message hash to the number of failure times.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### maxFailedExecutionTimes
```solidity
function maxFailedExecutionTimes() external view returns (uint256)
```
The maximum number of times each L1 message can fail on L2.
@@ -128,30 +178,13 @@ Message nonce, used to avoid relay attack.
|---|---|---|
| _0 | uint256 | undefined |
### messagePasser
### messageQueue
```solidity
function messagePasser() external view returns (contract L2ToL1MessagePasser)
function messageQueue() external view returns (address)
```
Contract to store the sent message.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | contract L2ToL1MessagePasser | undefined |
### owner
```solidity
function owner() external view returns (address)
```
The address of the current owner.
The address of L2MessageQueue.
@@ -162,10 +195,55 @@ The address of the current owner.
|---|---|---|
| _0 | address | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### pause
```solidity
function pause() external nonpayable
```
Pause the contract
*This function can only called by contract owner.*
### paused
```solidity
function paused() external view returns (bool)
```
*Returns true if the contract is paused, and false otherwise.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### relayMessage
```solidity
function relayMessage(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message) external nonpayable
function relayMessage(address _from, address _to, uint256 _value, uint256 _nonce, bytes _message) external nonpayable
```
execute L1 =&gt; L2 message
@@ -176,13 +254,11 @@ execute L1 =&gt; L2 message
| Name | Type | Description |
|---|---|---|
| _from | address | The address of the sender of the message. |
| _to | address | The address of the recipient of the message. |
| _value | uint256 | The msg.value passed to the message call. |
| _fee | uint256 | The amount of fee in ETH to charge. |
| _deadline | uint256 | The deadline of the message. |
| _nonce | uint256 | The nonce of the message to avoid replay attack. |
| _message | bytes | The content of the message. |
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _nonce | uint256 | undefined |
| _message | bytes | undefined |
### renounceOwnership
@@ -190,53 +266,74 @@ execute L1 =&gt; L2 message
function renounceOwnership() external nonpayable
```
Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner.
*Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
### retryMessageWithProof
```solidity
function retryMessageWithProof(address _from, address _to, uint256 _value, uint256 _nonce, bytes _message, IL2ScrollMessenger.L1MessageProof _proof) external nonpayable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _from | address | undefined |
| _to | address | undefined |
| _value | uint256 | undefined |
| _nonce | uint256 | undefined |
| _message | bytes | undefined |
| _proof | IL2ScrollMessenger.L1MessageProof | undefined |
### sendMessage
```solidity
function sendMessage(address _to, uint256 _fee, bytes _message, uint256 _gasLimit) external payable
function sendMessage(address _to, uint256 _value, bytes _message, uint256 _gasLimit) external payable
```
Send cross chain message (L1 =&gt; L2 or L2 =&gt; L1)
Send cross chain message from L1 to L2 or L2 to L1.
*Currently, only privileged accounts can call this function for safty. And adding an extra `_fee` variable make it more easy to upgrade to decentralized version.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _to | address | The address of account who recieve the message. |
| _fee | uint256 | The amount of fee in Ether the caller would like to pay to the relayer. |
| _message | bytes | The content of the message. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _to | address | undefined |
| _value | uint256 | undefined |
| _message | bytes | undefined |
| _gasLimit | uint256 | undefined |
### transferOwnership
```solidity
function transferOwnership(address _newOwner) external nonpayable
function transferOwnership(address newOwner) external nonpayable
```
Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newOwner | address | undefined |
| newOwner | address | undefined |
### updateDropDelayDuration
### updateFeeVault
```solidity
function updateDropDelayDuration(uint256 _newDuration) external nonpayable
function updateFeeVault(address _newFeeVault) external nonpayable
```
Update the drop delay duration.
Update fee vault contract.
*This function can only called by contract owner.*
@@ -244,23 +341,23 @@ Update the drop delay duration.
| Name | Type | Description |
|---|---|---|
| _newDuration | uint256 | The new delay duration to update. |
| _newFeeVault | address | The address of new fee vault contract. |
### updateGasOracle
### updateMaxFailedExecutionTimes
```solidity
function updateGasOracle(address _newGasOracle) external nonpayable
function updateMaxFailedExecutionTimes(uint256 _maxFailedExecutionTimes) external nonpayable
```
Update the address of gas oracle.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newGasOracle | address | The address to update. |
| _maxFailedExecutionTimes | uint256 | undefined |
### updateWhitelist
@@ -278,6 +375,54 @@ Update whitelist contract.
|---|---|---|
| _newWhitelist | address | The address of new whitelist contract. |
### verifyMessageExecutionStatus
```solidity
function verifyMessageExecutionStatus(bytes32 _blockHash, bytes32 _msgHash, bytes _proof) external view returns (bool)
```
Check whether the message is executed in the corresponding L1 block.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | The block hash where the message should in. |
| _msgHash | bytes32 | The hash of the message to check. |
| _proof | bytes | The encoded storage proof from eth_getProof. |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | bool Return true is the message is executed in L1, otherwise return false. |
### verifyMessageInclusionStatus
```solidity
function verifyMessageInclusionStatus(bytes32 _blockHash, bytes32 _msgHash, bytes _proof) external view returns (bool)
```
Check whether the l1 message is included in the corresponding L1 block.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | The block hash where the message should in. |
| _msgHash | bytes32 | The hash of the message to check. |
| _proof | bytes | The encoded storage proof from eth_getProof. |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | bool Return true is the message is included in L1, otherwise return false. |
### whitelist
```solidity
@@ -319,10 +464,10 @@ See {IScrollMessenger-xDomainMessageSender}
### FailedRelayedMessage
```solidity
event FailedRelayedMessage(bytes32 indexed msgHash)
event FailedRelayedMessage(bytes32 indexed messageHash)
```
Emitted when a cross domain message is failed to relay.
@@ -330,31 +475,15 @@ event FailedRelayedMessage(bytes32 indexed msgHash)
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
### MessageDropped
```solidity
event MessageDropped(bytes32 indexed msgHash)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
| messageHash `indexed` | bytes32 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed _oldOwner, address indexed _newOwner)
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
Emitted when owner is changed by current owner.
@@ -362,16 +491,32 @@ Emitted when owner is changed by current owner.
| Name | Type | Description |
|---|---|---|
| _oldOwner `indexed` | address | undefined |
| _newOwner `indexed` | address | undefined |
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### Paused
```solidity
event Paused(address account)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| account | address | undefined |
### RelayedMessage
```solidity
event RelayedMessage(bytes32 indexed msgHash)
event RelayedMessage(bytes32 indexed messageHash)
```
Emitted when a cross domain message is relayed successfully.
@@ -379,15 +524,15 @@ event RelayedMessage(bytes32 indexed msgHash)
| Name | Type | Description |
|---|---|---|
| msgHash `indexed` | bytes32 | undefined |
| messageHash `indexed` | bytes32 | undefined |
### SentMessage
```solidity
event SentMessage(address indexed target, address sender, uint256 value, uint256 fee, uint256 deadline, bytes message, uint256 messageNonce, uint256 gasLimit)
event SentMessage(address indexed sender, address indexed target, uint256 value, uint256 messageNonce, uint256 gasLimit, bytes message)
```
Emitted when a cross domain message is sent.
@@ -395,22 +540,20 @@ event SentMessage(address indexed target, address sender, uint256 value, uint256
| Name | Type | Description |
|---|---|---|
| sender `indexed` | address | undefined |
| target `indexed` | address | undefined |
| sender | address | undefined |
| value | uint256 | undefined |
| fee | uint256 | undefined |
| deadline | uint256 | undefined |
| message | bytes | undefined |
| messageNonce | uint256 | undefined |
| gasLimit | uint256 | undefined |
| message | bytes | undefined |
### UpdateDropDelayDuration
### Unpaused
```solidity
event UpdateDropDelayDuration(uint256 _oldDuration, uint256 _newDuration)
event Unpaused(address account)
```
Emitted when owner updates drop delay duration
@@ -418,16 +561,15 @@ Emitted when owner updates drop delay duration
| Name | Type | Description |
|---|---|---|
| _oldDuration | uint256 | undefined |
| _newDuration | uint256 | undefined |
| account | address | undefined |
### UpdateGasOracle
### UpdateFeeVault
```solidity
event UpdateGasOracle(address _oldGasOracle, address _newGasOracle)
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault)
```
Emitted when owner updates gas oracle contract.
Emitted when owner updates fee vault contract.
@@ -435,8 +577,24 @@ Emitted when owner updates gas oracle contract.
| Name | Type | Description |
|---|---|---|
| _oldGasOracle | address | undefined |
| _newGasOracle | address | undefined |
| _oldFeeVault | address | undefined |
| _newFeeVault | address | undefined |
### UpdateMaxFailedExecutionTimes
```solidity
event UpdateMaxFailedExecutionTimes(uint256 maxFailedExecutionTimes)
```
Emitted when the maximum number of times each message can fail in L2 is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| maxFailedExecutionTimes | uint256 | The new maximum number of times each message can fail in L2. |
### UpdateWhitelist

View File

@@ -41,23 +41,12 @@ Complete a deposit from L1 to L2 and send fund to recipient&#39;s account in L2.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding L1 token. |
| _l2Token | address | The address of corresponding L2 token. |
| _from | address | The address of account who deposits the token in L1. |
| _to | address | The address of recipient in L2 to receive the token. |
| _amount | uint256 | The amount of the token to deposit. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
| _l1Token | address | undefined |
| _l2Token | address | undefined |
| _from | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
### getL1ERC20Address
@@ -73,7 +62,7 @@ Return the corresponding l1 token address given l2 token address.
| Name | Type | Description |
|---|---|---|
| _l2Token | address | The address of l2 token. |
| _l2Token | address | undefined |
#### Returns
@@ -95,7 +84,7 @@ Return the corresponding l2 token address given l1 token address.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of l1 token. |
| _l1Token | address | undefined |
#### Returns
@@ -128,7 +117,7 @@ function initialize(address _counterpart, address _router, address _messenger, a
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -187,9 +176,9 @@ Withdraw of some token to a caller&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20
@@ -205,10 +194,10 @@ Withdraw of some token to a recipient&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20AndCall
@@ -224,11 +213,11 @@ Withdraw of some token to a recipient&#39;s account on L1 and call.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| _gasLimit | uint256 | undefined |
@@ -237,10 +226,10 @@ Withdraw of some token to a recipient&#39;s account on L1 and call.
### FinalizeDepositERC20
```solidity
event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeDepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
@@ -248,20 +237,20 @@ event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, a
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### WithdrawERC20
```solidity
event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event WithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone withdraw ERC20 token from L2 to L1.
@@ -269,12 +258,12 @@ event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |

View File

@@ -58,23 +58,12 @@ Complete a deposit from L1 to L2 and send fund to recipient&#39;s account in L2.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding L1 token. |
| _l2Token | address | The address of corresponding L2 token. |
| _from | address | The address of account who deposits the token in L1. |
| _to | address | The address of recipient in L2 to receive the token. |
| _amount | uint256 | The amount of the token to deposit. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
### finalizeDropMessage
```solidity
function finalizeDropMessage() external payable
```
| _l1Token | address | undefined |
| _l2Token | address | undefined |
| _from | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
### getL1ERC20Address
@@ -123,7 +112,7 @@ Return the corresponding l2 token address given l1 token address.
### initialize
```solidity
function initialize(address _counterpart, address _router, address _messenger, address _WETH, address _l1WETH) external nonpayable
function initialize(address _counterpart, address _router, address _messenger) external nonpayable
```
@@ -137,8 +126,6 @@ function initialize(address _counterpart, address _router, address _messenger, a
| _counterpart | address | undefined |
| _router | address | undefined |
| _messenger | address | undefined |
| _WETH | address | undefined |
| _l1WETH | address | undefined |
### l1WETH
@@ -163,7 +150,7 @@ The address of L1 WETH address.
function messenger() external view returns (address)
```
The address of L1ScrollMessenger/L2ScrollMessenger contract.
The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
@@ -205,9 +192,9 @@ Withdraw of some token to a caller&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20
@@ -223,10 +210,10 @@ Withdraw of some token to a recipient&#39;s account on L1.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _gasLimit | uint256 | undefined |
### withdrawERC20AndCall
@@ -242,11 +229,11 @@ Withdraw of some token to a recipient&#39;s account on L1 and call.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of token in L2. |
| _to | address | The address of recipient&#39;s account on L1. |
| _amount | uint256 | The amount of token to transfer. |
| _data | bytes | Optional data to forward to recipient&#39;s account. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
| _token | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| _gasLimit | uint256 | undefined |
@@ -255,10 +242,10 @@ Withdraw of some token to a recipient&#39;s account on L1 and call.
### FinalizeDepositERC20
```solidity
event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event FinalizeDepositERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
@@ -266,20 +253,20 @@ event FinalizeDepositERC20(address indexed _l1Token, address indexed _l2Token, a
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |
### WithdrawERC20
```solidity
event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _amount, bytes _data)
event WithdrawERC20(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 amount, bytes data)
```
Emitted when someone withdraw ERC20 token from L2 to L1.
@@ -287,12 +274,12 @@ event WithdrawERC20(address indexed _l1Token, address indexed _l2Token, address
| Name | Type | Description |
|---|---|---|
| _l1Token `indexed` | address | undefined |
| _l2Token `indexed` | address | undefined |
| _from `indexed` | address | undefined |
| _to | address | undefined |
| _amount | uint256 | undefined |
| _data | bytes | undefined |
| l1Token `indexed` | address | undefined |
| l2Token `indexed` | address | undefined |
| from `indexed` | address | undefined |
| to | address | undefined |
| amount | uint256 | undefined |
| data | bytes | undefined |

View File

@@ -1,594 +0,0 @@
# ZKRollup
> ZKRollup
This contract maintains essential data for zk rollup, including: 1. a list of pending messages, which will be relayed to layer 2; 2. the block tree generated by layer 2 and it&#39;s status.
*the message queue is not used yet, the offline relayer only use events in `L1ScrollMessenger`.*
## Methods
### appendMessage
```solidity
function appendMessage(address _sender, address _target, uint256 _value, uint256 _fee, uint256 _deadline, bytes _message, uint256 _gasLimit) external nonpayable returns (uint256)
```
Append a cross chain message to message queue.
*This function should only be called by L1ScrollMessenger for safety.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _sender | address | The address of message sender in layer 1. |
| _target | address | The address of message recipient in layer 2. |
| _value | uint256 | The amount of ether sent to recipient in layer 2. |
| _fee | uint256 | The amount of ether paid to relayer in layer 2. |
| _deadline | uint256 | The deadline of the message. |
| _message | bytes | The content of the message. |
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### batches
```solidity
function batches(bytes32) external view returns (bytes32 batchHash, bytes32 parentHash, uint64 batchIndex, bool verified)
```
Mapping from batch id to batch struct.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| batchHash | bytes32 | undefined |
| parentHash | bytes32 | undefined |
| batchIndex | uint64 | undefined |
| verified | bool | undefined |
### blocks
```solidity
function blocks(bytes32) external view returns (bytes32 parentHash, bytes32 transactionRoot, uint64 blockHeight, uint64 batchIndex)
```
Mapping from block hash to block struct.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| parentHash | bytes32 | undefined |
| transactionRoot | bytes32 | undefined |
| blockHeight | uint64 | undefined |
| batchIndex | uint64 | undefined |
### commitBatch
```solidity
function commitBatch(IZKRollup.Layer2Batch _batch) external nonpayable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batch | IZKRollup.Layer2Batch | undefined |
### finalizeBatchWithProof
```solidity
function finalizeBatchWithProof(bytes32 _batchId, uint256[] _proof, uint256[] _instances) external nonpayable
```
finalize commited batch in layer 1
*will add more parameters if needed.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchId | bytes32 | The identification of the commited batch. |
| _proof | uint256[] | The corresponding proof of the commited batch. |
| _instances | uint256[] | Instance used to verify, generated from batch. |
### finalizedBatches
```solidity
function finalizedBatches(uint256) external view returns (bytes32)
```
Mapping from batch index to finalized batch id.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
### getMessageHashByIndex
```solidity
function getMessageHashByIndex(uint256 _index) external view returns (bytes32)
```
Return the message hash by index.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _index | uint256 | The index to query. |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
### getNextQueueIndex
```solidity
function getNextQueueIndex() external view returns (uint256)
```
Return the index of the first queue element not yet executed.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### getQeueuLength
```solidity
function getQeueuLength() external view returns (uint256)
```
Return the total number of appended message.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### importGenesisBlock
```solidity
function importGenesisBlock(IZKRollup.Layer2BlockHeader _genesis) external nonpayable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _genesis | IZKRollup.Layer2BlockHeader | undefined |
### initialize
```solidity
function initialize(uint256 _chainId) external nonpayable
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| _chainId | uint256 | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
```
Return whether the block is finalized by block hash.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
```
Return whether the block is finalized by block height.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHeight | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### lastFinalizedBatchID
```solidity
function lastFinalizedBatchID() external view returns (bytes32)
```
The latest finalized batch id.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
### layer2ChainId
```solidity
function layer2ChainId() external view returns (uint256)
```
The chain id of the corresponding layer 2 chain.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### layer2GasLimit
```solidity
function layer2GasLimit(uint256) external view returns (uint256)
```
Return the layer 2 block gas limit.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### messenger
```solidity
function messenger() external view returns (address)
```
The address of L1ScrollMessenger.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### operator
```solidity
function operator() external view returns (address)
```
The address of operator.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### renounceOwnership
```solidity
function renounceOwnership() external nonpayable
```
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
### revertBatch
```solidity
function revertBatch(bytes32 _batchId) external nonpayable
```
revert a pending batch.
*one can only revert unfinalized batches.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchId | bytes32 | The identification of the batch. |
### transferOwnership
```solidity
function transferOwnership(address newOwner) external nonpayable
```
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| newOwner | address | undefined |
### updateMessenger
```solidity
function updateMessenger(address _newMessenger) external nonpayable
```
Update the address of messenger.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newMessenger | address | The new messenger address to update. |
### updateOperator
```solidity
function updateOperator(address _newOperator) external nonpayable
```
Update the address of operator.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newOperator | address | The new operator address to update. |
### verifyMessageStateProof
```solidity
function verifyMessageStateProof(uint256 _batchIndex, uint256 _blockHeight) external view returns (bool)
```
Verify a state proof for message relay.
*add more fields.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchIndex | uint256 | undefined |
| _blockHeight | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
## Events
### CommitBatch
```solidity
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash)
```
Emitted when a new batch is commited.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchId `indexed` | bytes32 | undefined |
| _batchHash | bytes32 | undefined |
| _batchIndex | uint256 | undefined |
| _parentHash | bytes32 | undefined |
### FinalizeBatch
```solidity
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash)
```
Emitted when a batch is finalized.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchId `indexed` | bytes32 | undefined |
| _batchHash | bytes32 | undefined |
| _batchIndex | uint256 | undefined |
| _parentHash | bytes32 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RevertBatch
```solidity
event RevertBatch(bytes32 indexed _batchId)
```
Emitted when a batch is reverted.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchId `indexed` | bytes32 | undefined |
### UpdateMesssenger
```solidity
event UpdateMesssenger(address _oldMesssenger, address _newMesssenger)
```
Emitted when owner updates address of messenger
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldMesssenger | address | The address of old messenger contract. |
| _newMesssenger | address | The address of new messenger contract. |
### UpdateOperator
```solidity
event UpdateOperator(address _oldOperator, address _newOperator)
```
Emitted when owner updates address of operator
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldOperator | address | The address of old operator. |
| _newOperator | address | The address of new operator. |

View File

@@ -1,826 +0,0 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
import { expect } from "chai";
import { constants } from "ethers";
import { keccak256 } from "ethers/lib/utils";
import { ethers } from "hardhat";
import {
ZKRollup,
L1ScrollMessenger,
L2ScrollMessenger,
L1StandardERC20Gateway,
L2StandardERC20Gateway,
MockERC20,
ScrollStandardERC20Factory,
ScrollStandardERC20,
L1WETHGateway,
L2WETHGateway,
WETH9,
} from "../typechain";
describe("ERC20Gateway", async () => {
const layer1GasLimit = 12345;
const layer2GasLimit = 54321;
const DROP_DELAY_DURATION = 86400 * 7;
let deployer: SignerWithAddress;
let alice: SignerWithAddress;
let bob: SignerWithAddress;
let router: SignerWithAddress;
let rollup: ZKRollup;
let l1Messenger: L1ScrollMessenger;
let l2Messenger: L2ScrollMessenger;
beforeEach(async () => {
[deployer, alice, bob, router] = await ethers.getSigners();
const RollupVerifier = await ethers.getContractFactory("RollupVerifier", deployer);
const verifier = await RollupVerifier.deploy();
await verifier.deployed();
// deploy ZKRollup in layer 1
const ZKRollup = await ethers.getContractFactory("ZKRollup", {
signer: deployer,
libraries: { RollupVerifier: verifier.address },
});
rollup = (await ZKRollup.deploy()) as ZKRollup;
await rollup.initialize(233);
await rollup.importGenesisBlock({
blockHash: keccak256(constants.HashZero),
parentHash: constants.HashZero,
baseFee: 0,
stateRoot: constants.HashZero,
blockHeight: 0,
gasUsed: 0,
timestamp: 0,
extraData: "0x",
txs: [],
});
// deploy L1ScrollMessenger in layer 1
const L1ScrollMessenger = await ethers.getContractFactory("L1ScrollMessenger", deployer);
l1Messenger = await L1ScrollMessenger.deploy();
await l1Messenger.initialize(rollup.address);
await rollup.updateMessenger(l1Messenger.address);
// deploy L2ScrollMessenger in layer 2
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
l2Messenger = await L2ScrollMessenger.deploy(deployer.address);
});
context("StandardERC20Gateway", async () => {
let l1Gateway: L1StandardERC20Gateway;
let l2Gateway: L2StandardERC20Gateway;
let factory: ScrollStandardERC20Factory;
beforeEach(async () => {
// deploy token template in layer 2
const ScrollStandardERC20 = await ethers.getContractFactory("ScrollStandardERC20", deployer);
const tokenImpl = await ScrollStandardERC20.deploy();
// deploy token factory in layer 2
const ScrollStandardERC20Factory = await ethers.getContractFactory("ScrollStandardERC20Factory", deployer);
factory = await ScrollStandardERC20Factory.deploy(tokenImpl.address);
// deploy gateway in layer 1
const L1StandardERC20Gateway = await ethers.getContractFactory("L1StandardERC20Gateway", deployer);
l1Gateway = await L1StandardERC20Gateway.deploy();
// deploy gateway in layer 2
const L2StandardERC20Gateway = await ethers.getContractFactory("L2StandardERC20Gateway", deployer);
l2Gateway = await L2StandardERC20Gateway.deploy();
// initialize gateway in layer 1
await l1Gateway.initialize(
l2Gateway.address,
router.address,
l1Messenger.address,
tokenImpl.address,
factory.address
);
// initialize gateway in layer 2
await l2Gateway.initialize(l1Gateway.address, router.address, l2Messenger.address, factory.address);
await factory.transferOwnership(l2Gateway.address);
});
const run1to2 = async (decimals: number, sendToSelf: boolean) => {
context(`layer 1 to layer 2: decimals[${decimals}], sendToSelf[${sendToSelf}]`, async () => {
let l1Token: MockERC20;
let l2Token: ScrollStandardERC20;
let recipient: SignerWithAddress;
const amount1 = ethers.utils.parseUnits("1000", decimals);
const amount2 = ethers.utils.parseUnits("100", decimals);
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
// deploy mock token in layer 1
const MockERC20 = await ethers.getContractFactory("MockERC20", deployer);
l1Token = await MockERC20.deploy("XYZ", "ZYX", decimals);
await l1Token.mint(alice.address, amount1.add(amount2));
// calculate l2 token address
l2Token = await ethers.getContractAt(
"ScrollStandardERC20",
await l2Gateway.getL2ERC20Address(l1Token.address),
deployer
);
});
it("should succeed, when transfer on the first time", async () => {
// 1. approve
await l1Token.connect(alice).approve(l1Gateway.address, amount1);
// 2. do deposit
const nonce = await rollup.getQeueuLength();
const beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
const depositTx = sendToSelf
? await l1Gateway
.connect(alice)
["depositERC20(address,uint256,uint256)"](l1Token.address, amount1, layer1GasLimit)
: await l1Gateway
.connect(alice)
["depositERC20(address,address,uint256,uint256)"](
l1Token.address,
recipient.address,
amount1,
layer1GasLimit
);
await depositTx.wait();
const afterBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
// should emit DepositERC20
await expect(depositTx)
.to.emit(l1Gateway, "DepositERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount1, "0x");
// should emit SentMessage
const symbol = await l1Token.symbol();
const name = await l1Token.name();
const deployData = ethers.utils.defaultAbiCoder.encode(
["string", "string", "uint8"],
[symbol, name, decimals]
);
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1Token.address,
l2Token.address,
alice.address,
recipient.address,
amount1,
ethers.utils.defaultAbiCoder.encode(["bytes", "bytes"], ["0x", deployData]),
]);
await expect(depositTx)
.to.emit(l1Messenger, "SentMessage")
.withArgs(l2Gateway.address, l1Gateway.address, 0, 0, deadline, messageData, nonce, layer1GasLimit);
// should transfer token in gateway
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount1);
// 3. do relay in layer 2
const beforeBalanceLayer2 = constants.Zero;
const relayTx = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
0,
0,
deadline,
nonce,
messageData
);
await relayTx.wait();
const afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l2Messenger, "RelayedMessage");
// should emit FinalizeDepositERC20
await expect(relayTx)
.to.emit(l2Gateway, "FinalizeDepositERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount1, "0x");
// should deploy token in layer 2
expect(await l2Token.symbol()).to.eq(symbol);
expect(await l2Token.name()).to.eq(name);
expect(await l2Token.decimals()).to.eq(decimals);
// should mint in layer 2
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount1);
});
it("should succeed, when transfer on the second time", async () => {
// 1. approve first time
await l1Token.connect(alice).approve(l1Gateway.address, amount1);
// 2. do deposit first time
const nonce1 = await rollup.getQeueuLength();
let beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
const depositTx1 = sendToSelf
? await l1Gateway
.connect(alice)
["depositERC20(address,uint256,uint256)"](l1Token.address, amount1, layer1GasLimit)
: await l1Gateway
.connect(alice)
["depositERC20(address,address,uint256,uint256)"](
l1Token.address,
recipient.address,
amount1,
layer1GasLimit
);
await depositTx1.wait();
let afterBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
const symbol = await l1Token.symbol();
const name = await l1Token.name();
const deployData = ethers.utils.defaultAbiCoder.encode(
["string", "string", "uint8"],
[symbol, name, decimals]
);
const deadline1 = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData1 = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1Token.address,
l2Token.address,
alice.address,
recipient.address,
amount1,
ethers.utils.defaultAbiCoder.encode(["bytes", "bytes"], ["0x", deployData]),
]);
// should transfer token in gateway
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount1);
// 3. do relay in layer 2 first time
let beforeBalanceLayer2 = constants.Zero;
const relayTx1 = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
0,
0,
deadline1,
nonce1,
messageData1
);
await relayTx1.wait();
let afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount1);
// 4. approve second time
await l1Token.connect(alice).approve(l1Gateway.address, amount2);
// 5. do deposit second time
const calldata = "0x000033";
const nonce2 = await rollup.getQeueuLength();
beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
const depositTx2 = await l1Gateway
.connect(alice)
.depositERC20AndCall(l1Token.address, recipient.address, amount2, calldata, layer1GasLimit);
await depositTx2.wait();
afterBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
// should emit DepositERC20
await expect(depositTx2)
.to.emit(l1Gateway, "DepositERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount2, calldata);
// should emit SentMessage
const deadline2 = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData2 = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1Token.address,
l2Token.address,
alice.address,
recipient.address,
amount2,
calldata,
]);
await expect(depositTx2)
.to.emit(l1Messenger, "SentMessage")
.withArgs(l2Gateway.address, l1Gateway.address, 0, 0, deadline2, messageData2, nonce2, layer1GasLimit);
// should transfer token in gateway
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount2);
// 3. do relay in layer 2
beforeBalanceLayer2 = await l2Token.balanceOf(recipient.address);
const relayTx2 = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
0,
0,
deadline2,
nonce2,
messageData2
);
await relayTx2.wait();
afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx2).to.emit(l2Messenger, "RelayedMessage");
// should emit FinalizeDepositERC20
await expect(relayTx2)
.to.emit(l2Gateway, "FinalizeDepositERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount2, calldata);
// should mint in layer 2
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount2);
});
});
};
const run2to1 = async (decimals: number, sendToSelf: boolean) => {
context(`layer 2 to layer 1: decimals[${decimals}], sendToSelf[${sendToSelf}]`, async () => {
let l1Token: MockERC20;
let l2Token: ScrollStandardERC20;
let recipient: SignerWithAddress;
const amount = ethers.utils.parseUnits("1000", decimals);
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
// deploy mock token in layer 1
const MockERC20 = await ethers.getContractFactory("MockERC20", deployer);
l1Token = await MockERC20.deploy("XYZ", "ZYX", decimals);
await l1Token.mint(alice.address, amount);
// calculate l2 token address
l2Token = await ethers.getContractAt(
"ScrollStandardERC20",
await l2Gateway.getL2ERC20Address(l1Token.address),
deployer
);
await l1Token.connect(alice).approve(l1Gateway.address, constants.MaxUint256);
const depositTx = await l1Gateway
.connect(alice)
["depositERC20(address,uint256,uint256)"](l1Token.address, amount, layer1GasLimit);
await depositTx.wait();
const symbol = await l1Token.symbol();
const name = await l1Token.name();
const deployData = ethers.utils.defaultAbiCoder.encode(
["string", "string", "uint8"],
[symbol, name, decimals]
);
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const nonce = await rollup.getQeueuLength();
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1Token.address,
l2Token.address,
alice.address,
alice.address,
amount,
ethers.utils.defaultAbiCoder.encode(["bytes", "bytes"], ["0x", deployData]),
]);
const relayTx = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
0,
0,
deadline,
nonce,
messageData
);
await relayTx.wait();
expect(await l2Token.balanceOf(alice.address)).to.eq(amount);
});
it("should succeed, when transfer without data", async () => {
// 1. approve
await l2Token.connect(alice).approve(l2Gateway.address, amount);
// 2. withdraw
const nonce = await l2Messenger.messageNonce();
const balanceBefore = await l2Token.balanceOf(alice.address);
const withdrawTx = sendToSelf
? await l2Gateway
.connect(alice)
["withdrawERC20(address,uint256,uint256)"](l2Token.address, amount, layer2GasLimit)
: await l2Gateway
.connect(alice)
["withdrawERC20(address,address,uint256,uint256)"](
l2Token.address,
recipient.address,
amount,
layer2GasLimit
);
await withdrawTx.wait();
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const balanceAfter = await l2Token.balanceOf(alice.address);
// should emit WithdrawERC20
await expect(withdrawTx)
.to.emit(l2Gateway, "WithdrawERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount, "0x");
// should emit SentMessage
const messageData = l1Gateway.interface.encodeFunctionData("finalizeWithdrawERC20", [
l1Token.address,
l2Token.address,
alice.address,
recipient.address,
amount,
"0x",
]);
await expect(withdrawTx)
.to.emit(l2Messenger, "SentMessage")
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit);
// should transfer from alice
expect(balanceBefore.sub(balanceAfter)).to.eq(amount);
// 3. relay in layer 1
const relayTx = await l1Messenger.relayMessageWithProof(
l2Gateway.address,
l1Gateway.address,
0,
0,
deadline,
nonce,
messageData,
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
);
await relayTx.wait();
// should emit RelayedMessage
await expect(relayTx).to.emit(l1Messenger, "RelayedMessage");
// should emit FinalizeWithdrawERC20
await expect(relayTx)
.to.emit(l1Gateway, "FinalizeWithdrawERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount, "0x");
// should transfer out
expect(await l1Token.balanceOf(l1Gateway.address)).to.eq(0);
expect(await l1Token.balanceOf(recipient.address)).to.eq(amount);
});
it("should succeed, when transfer with data", async () => {
const calldata = "0x3d4233433232";
// 1. approve
await l2Token.connect(alice).approve(l2Gateway.address, amount);
// 2. withdraw
const nonce = await l2Messenger.messageNonce();
const withdrawTx = await l2Gateway
.connect(alice)
.withdrawERC20AndCall(l2Token.address, recipient.address, amount, calldata, layer2GasLimit);
await withdrawTx.wait();
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
// should emit WithdrawERC20
await expect(withdrawTx)
.to.emit(l2Gateway, "WithdrawERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount, calldata);
// should emit SentMessage
const messageData = l1Gateway.interface.encodeFunctionData("finalizeWithdrawERC20", [
l1Token.address,
l2Token.address,
alice.address,
recipient.address,
amount,
calldata,
]);
await expect(withdrawTx)
.to.emit(l2Messenger, "SentMessage")
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit);
// 3. relay in layer 1
const relayTx = await l1Messenger.relayMessageWithProof(
l2Gateway.address,
l1Gateway.address,
0,
0,
deadline,
nonce,
messageData,
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
);
await relayTx.wait();
// should emit RelayedMessage
await expect(relayTx).to.emit(l1Messenger, "RelayedMessage");
// should emit FinalizeWithdrawERC20
await expect(relayTx)
.to.emit(l1Gateway, "FinalizeWithdrawERC20")
.withArgs(l1Token.address, l2Token.address, alice.address, recipient.address, amount, calldata);
// should transfer out
expect(await l1Token.balanceOf(l1Gateway.address)).to.eq(0);
expect(await l1Token.balanceOf(recipient.address)).to.eq(amount);
});
});
};
for (const decimals of [6, 18, 24]) {
for (const sendToSelf of [true, false]) {
run1to2(decimals, sendToSelf);
run2to1(decimals, sendToSelf);
}
}
});
context("WETHGateway", async () => {
let l1Gateway: L1WETHGateway;
let l2Gateway: L2WETHGateway;
let l1WETH: WETH9;
let l2WETH: WETH9;
beforeEach(async () => {
// deploy weth in layer 1 and layer 2
const WETH9 = await ethers.getContractFactory("WETH9", deployer);
l1WETH = await WETH9.deploy();
l2WETH = await WETH9.deploy();
// deploy gateway in layer 1
const L1WETHGateway = await ethers.getContractFactory("L1WETHGateway", deployer);
l1Gateway = await L1WETHGateway.deploy();
// deploy gateway in layer 2
const L2WETHGateway = await ethers.getContractFactory("L2WETHGateway", deployer);
l2Gateway = await L2WETHGateway.deploy();
// initialize gateway in layer 1
await l1Gateway.initialize(
l2Gateway.address,
router.address,
l1Messenger.address,
l1WETH.address,
l2WETH.address
);
// initialize gateway in layer 2
await l2Gateway.initialize(
l1Gateway.address,
router.address,
l2Messenger.address,
l2WETH.address,
l1WETH.address
);
});
const run1to2 = async (sendToSelf: boolean) => {
context(`layer 1 to layer 2: sendToSelf[${sendToSelf}]`, async () => {
const amount = ethers.utils.parseEther("100");
let recipient: SignerWithAddress;
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
if ((await ethers.provider.getBalance(l2Messenger.address)).eq(constants.Zero)) {
await deployer.sendTransaction({ to: l2Messenger.address, value: amount });
await l1WETH.connect(alice).deposit({ value: amount });
}
expect(await ethers.provider.getBalance(l2Messenger.address)).to.eq(amount);
});
it("should transfer to layer 2 without data", async () => {
// 1. deposit and approve
await l1WETH.connect(alice).approve(l1Gateway.address, amount);
// 2. do deposit
const nonce = await rollup.getQeueuLength();
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
const depositTx = sendToSelf
? await l1Gateway
.connect(alice)
["depositERC20(address,uint256,uint256)"](l1WETH.address, amount, layer1GasLimit)
: await l1Gateway
.connect(alice)
["depositERC20(address,address,uint256,uint256)"](
l1WETH.address,
recipient.address,
amount,
layer1GasLimit
);
await depositTx.wait();
const afterBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
// should emit DepositERC20
await expect(depositTx)
.to.emit(l1Gateway, "DepositERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, "0x");
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1WETH.address,
l2WETH.address,
alice.address,
recipient.address,
amount,
"0x",
]);
await expect(depositTx)
.to.emit(l1Messenger, "SentMessage")
.withArgs(l2Gateway.address, l1Gateway.address, amount, 0, deadline, messageData, nonce, layer1GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
// 3. do relay in layer 2
const beforeBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
const relayTx = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
amount,
0,
deadline,
nonce,
messageData
);
await relayTx.wait();
const afterBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l2Messenger, "RelayedMessage");
// should emit FinalizeDepositERC20
await expect(relayTx)
.to.emit(l2Gateway, "FinalizeDepositERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, "0x");
// should transfer and wrap weth in layer 2
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
expect(await ethers.provider.getBalance(l2Messenger.address)).to.eq(constants.Zero);
});
it("should transfer to layer 2 data", async () => {
const calldata = "0x3333444555fdad";
// 1. deposit and approve
await l1WETH.connect(alice).approve(l1Gateway.address, amount);
// 2. do deposit
const nonce = await rollup.getQeueuLength();
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
const depositTx = await l1Gateway
.connect(alice)
.depositERC20AndCall(l1WETH.address, recipient.address, amount, calldata, layer1GasLimit);
await depositTx.wait();
const afterBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
// should emit DepositERC20
await expect(depositTx)
.to.emit(l1Gateway, "DepositERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, calldata);
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
l1WETH.address,
l2WETH.address,
alice.address,
recipient.address,
amount,
calldata,
]);
await expect(depositTx)
.to.emit(l1Messenger, "SentMessage")
.withArgs(l2Gateway.address, l1Gateway.address, amount, 0, deadline, messageData, nonce, layer1GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
// 3. do relay in layer 2
const beforeBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
const relayTx = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
amount,
0,
deadline,
nonce,
messageData
);
await relayTx.wait();
const afterBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l2Messenger, "RelayedMessage");
// should emit FinalizeDepositERC20
await expect(relayTx)
.to.emit(l2Gateway, "FinalizeDepositERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, calldata);
// should transfer and wrap weth in layer 2
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
expect(await ethers.provider.getBalance(l2Messenger.address)).to.eq(constants.Zero);
});
});
};
const run2to1 = async (sendToSelf: boolean) => {
context(`layer 2 to layer 1: sendToSelf[${sendToSelf}]`, async () => {
const amount = ethers.utils.parseEther("100");
let recipient: SignerWithAddress;
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
await l1WETH.connect(alice).deposit({ value: amount });
await l1WETH.connect(alice).approve(l1Gateway.address, amount);
await l1Gateway.connect(alice)["depositERC20(address,uint256,uint256)"](l1WETH.address, amount, 0);
await l2WETH.connect(alice).deposit({ value: amount });
});
it("should transfer to layer 1 without data", async () => {
// 1. approve
await l2WETH.connect(alice).approve(l2Gateway.address, amount);
// 2. do withdraw in layer 2
const nonce = await l2Messenger.messageNonce();
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
const withdrawTx = sendToSelf
? await l2Gateway
.connect(alice)
["withdrawERC20(address,uint256,uint256)"](l2WETH.address, amount, layer2GasLimit)
: await l2Gateway
.connect(alice)
["withdrawERC20(address,address,uint256,uint256)"](
l2WETH.address,
recipient.address,
amount,
layer2GasLimit
);
await withdrawTx.wait();
const afterBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
// should emit WithdrawERC20
await expect(withdrawTx)
.to.emit(l2Gateway, "WithdrawERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, "0x");
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l1Gateway.interface.encodeFunctionData("finalizeWithdrawERC20", [
l1WETH.address,
l2WETH.address,
alice.address,
recipient.address,
amount,
"0x",
]);
await expect(withdrawTx)
.to.emit(l2Messenger, "SentMessage")
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
// 3. do relay in layer 1
const beforeBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
const relayTx = await l1Messenger.relayMessageWithProof(
l2Gateway.address,
l1Gateway.address,
amount,
0,
deadline,
nonce,
messageData,
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
);
await relayTx.wait();
const afterBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l1Messenger, "RelayedMessage");
// should emit FinalizeWithdrawERC20
await expect(relayTx)
.to.emit(l1Gateway, "FinalizeWithdrawERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, "0x");
// should transfer and wrap weth in layer 1
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
});
it("should transfer to layer 1 with data", async () => {
const calldata = "0x33445566778899";
// 1. approve
await l2WETH.connect(alice).approve(l2Gateway.address, amount);
// 2. do withdraw in layer 2
const nonce = await l2Messenger.messageNonce();
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
const withdrawTx = await l2Gateway
.connect(alice)
.withdrawERC20AndCall(l2WETH.address, recipient.address, amount, calldata, layer2GasLimit);
await withdrawTx.wait();
const afterBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
// should emit WithdrawERC20
await expect(withdrawTx)
.to.emit(l2Gateway, "WithdrawERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, calldata);
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l1Gateway.interface.encodeFunctionData("finalizeWithdrawERC20", [
l1WETH.address,
l2WETH.address,
alice.address,
recipient.address,
amount,
calldata,
]);
await expect(withdrawTx)
.to.emit(l2Messenger, "SentMessage")
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
// 3. do relay in layer 1
const beforeBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
const relayTx = await l1Messenger.relayMessageWithProof(
l2Gateway.address,
l1Gateway.address,
amount,
0,
deadline,
nonce,
messageData,
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
);
await relayTx.wait();
const afterBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l1Messenger, "RelayedMessage");
// should emit FinalizeWithdrawERC20
await expect(relayTx)
.to.emit(l1Gateway, "FinalizeWithdrawERC20")
.withArgs(l1WETH.address, l2WETH.address, alice.address, recipient.address, amount, calldata);
// should transfer and wrap weth in layer 1
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
});
});
};
for (const sendToSelf of [true, false]) {
run1to2(sendToSelf);
run2to1(sendToSelf);
}
});
});

View File

@@ -1,220 +0,0 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
import { expect } from "chai";
import { constants } from "ethers";
import { keccak256 } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { ZKRollup, L1ScrollMessenger, L2ScrollMessenger, L1GatewayRouter, L2GatewayRouter } from "../typechain";
describe("GatewayRouter", async () => {
const layer1GasLimit = 12345;
const layer2GasLimit = 54321;
const DROP_DELAY_DURATION = 86400 * 7;
let deployer: SignerWithAddress;
let alice: SignerWithAddress;
let bob: SignerWithAddress;
let rollup: ZKRollup;
let l1Messenger: L1ScrollMessenger;
let l2Messenger: L2ScrollMessenger;
beforeEach(async () => {
[deployer, alice, bob] = await ethers.getSigners();
const RollupVerifier = await ethers.getContractFactory("RollupVerifier", deployer);
const verifier = await RollupVerifier.deploy();
await verifier.deployed();
// deploy ZKRollup in layer 1
const ZKRollup = await ethers.getContractFactory("ZKRollup", {
signer: deployer,
libraries: { RollupVerifier: verifier.address },
});
rollup = (await ZKRollup.deploy()) as ZKRollup;
await rollup.initialize(233);
await rollup.importGenesisBlock({
blockHash: keccak256(constants.HashZero),
parentHash: constants.HashZero,
baseFee: 0,
stateRoot: constants.HashZero,
blockHeight: 0,
gasUsed: 0,
timestamp: 0,
extraData: "0x",
txs: []
});
// deploy L1ScrollMessenger in layer 1
const L1ScrollMessenger = await ethers.getContractFactory("L1ScrollMessenger", deployer);
l1Messenger = await L1ScrollMessenger.deploy();
await l1Messenger.initialize(rollup.address);
await rollup.updateMessenger(l1Messenger.address);
// deploy L2ScrollMessenger in layer 2
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
l2Messenger = await L2ScrollMessenger.deploy(deployer.address);
});
context("WETHGateway", async () => {
let l1Gateway: L1GatewayRouter;
let l2Gateway: L2GatewayRouter;
beforeEach(async () => {
// deploy gateway in layer 1
const L1GatewayRouter = await ethers.getContractFactory("L1GatewayRouter", deployer);
l1Gateway = await L1GatewayRouter.deploy();
// deploy gateway in layer 2
const L2GatewayRouter = await ethers.getContractFactory("L2GatewayRouter", deployer);
l2Gateway = await L2GatewayRouter.deploy();
// initialize gateway in layer 1
await l1Gateway.initialize(constants.AddressZero, l2Gateway.address, l1Messenger.address);
// initialize gateway in layer 2
await l2Gateway.initialize(constants.AddressZero, l1Gateway.address, l2Messenger.address);
});
const run1to2 = async (sendToSelf: boolean) => {
context(`layer 1 to layer 2: sendToSelf[${sendToSelf}]`, async () => {
const amount = ethers.utils.parseEther("100");
let recipient: SignerWithAddress;
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
if ((await ethers.provider.getBalance(l2Messenger.address)).eq(constants.Zero)) {
await deployer.sendTransaction({ to: l2Messenger.address, value: amount });
}
expect(await ethers.provider.getBalance(l2Messenger.address)).to.eq(amount);
});
it("should transfer to layer 2 without data", async () => {
// 2. do deposit
const nonce = await rollup.getQeueuLength();
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
const depositTx = sendToSelf
? await l1Gateway.connect(alice)["depositETH(uint256)"](layer1GasLimit, { value: amount })
: await l1Gateway
.connect(alice)
["depositETH(address,uint256)"](recipient.address, layer1GasLimit, { value: amount });
await depositTx.wait();
const afterBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
// should emit DepositETH
await expect(depositTx)
.to.emit(l1Gateway, "DepositETH")
.withArgs(alice.address, recipient.address, amount, "0x");
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositETH", [
alice.address,
recipient.address,
amount,
"0x",
]);
await expect(depositTx)
.to.emit(l1Messenger, "SentMessage")
.withArgs(l2Gateway.address, l1Gateway.address, amount, 0, deadline, messageData, nonce, layer1GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
// 3. do relay in layer 2
const beforeBalanceLayer2 = await ethers.provider.getBalance(recipient.address);
const relayTx = await l2Messenger.relayMessage(
l1Gateway.address,
l2Gateway.address,
amount,
0,
deadline,
nonce,
messageData
);
await relayTx.wait();
const afterBalanceLayer2 = await ethers.provider.getBalance(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l2Messenger, "RelayedMessage");
// should emit FinalizeDepositETH
await expect(relayTx)
.to.emit(l2Gateway, "FinalizeDepositETH")
.withArgs(alice.address, recipient.address, amount, "0x");
// should transfer and wrap weth in layer 2
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
expect(await ethers.provider.getBalance(l2Messenger.address)).to.eq(constants.Zero);
});
});
};
const run2to1 = async (sendToSelf: boolean) => {
context(`layer 2 to layer 1: sendToSelf[${sendToSelf}]`, async () => {
const amount = ethers.utils.parseEther("100");
let recipient: SignerWithAddress;
beforeEach(async () => {
recipient = sendToSelf ? alice : bob;
await l1Gateway["depositETH(uint256)"](layer1GasLimit, { value: amount });
});
it("should transfer to layer 1 without data", async () => {
// 2. do withdraw in layer 2
const nonce = await l2Messenger.messageNonce();
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
const withdrawTx = sendToSelf
? await l2Gateway.connect(alice)["withdrawETH(uint256)"](layer2GasLimit, { value: amount })
: await l2Gateway
.connect(alice)
["withdrawETH(address,uint256)"](recipient.address, layer2GasLimit, { value: amount });
await withdrawTx.wait();
const afterBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
// should emit WithdrawETH
await expect(withdrawTx)
.to.emit(l2Gateway, "WithdrawETH")
.withArgs(alice.address, recipient.address, amount, "0x");
// should emit SentMessage
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
const messageData = l1Gateway.interface.encodeFunctionData("finalizeWithdrawETH", [
alice.address,
recipient.address,
amount,
"0x",
]);
await expect(withdrawTx)
.to.emit(l2Messenger, "SentMessage")
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
// should unwrap transfer to messenger
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
// 3. do relay in layer 1
const beforeBalanceLayer1 = await ethers.provider.getBalance(recipient.address);
const relayTx = await l1Messenger.relayMessageWithProof(
l2Gateway.address,
l1Gateway.address,
amount,
0,
deadline,
nonce,
messageData,
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
);
await relayTx.wait();
const afterBalanceLayer1 = await ethers.provider.getBalance(recipient.address);
// should emit RelayedMessage
await expect(relayTx).to.emit(l1Messenger, "RelayedMessage");
// should emit FinalizeWithdrawETH
await expect(relayTx)
.to.emit(l1Gateway, "FinalizeWithdrawETH")
.withArgs(alice.address, recipient.address, amount, "0x");
// should transfer and wrap weth in layer 1
expect(afterBalanceLayer1.sub(beforeBalanceLayer1)).to.eq(amount);
});
});
};
for (const sendToSelf of [true, false]) {
run1to2(sendToSelf);
run2to1(sendToSelf);
}
});
});

View File

@@ -0,0 +1,230 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { expect } from "chai";
import { BigNumber, BigNumberish, constants } from "ethers";
import { concat, RLP } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { L1BlockContainer } from "../typechain";
interface IImportTestConfig {
hash: string;
parentHash: string;
uncleHash: string;
coinbase: string;
stateRoot: string;
transactionsRoot: string;
receiptsRoot: string;
logsBloom: string;
difficulty: BigNumberish;
blockHeight: number;
gasLimit: BigNumberish;
gasUsed: BigNumberish;
blockTimestamp: number;
extraData: string;
mixHash: string;
blockNonce: string;
baseFee: BigNumberish;
}
const testcases: Array<IImportTestConfig> = [
{
hash: "0x02250e97ef862444dd1d70acbe925c289bb2acf20a808cb8f4d1409d3adcfa1b",
parentHash: "0x95e612b2a734f5a8c6aad3f6662b18f983ce8b653854d7c307bf999d9be323af",
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
coinbase: "0x690b9a9e9aa1c9db991c7721a92d351db4fac990",
stateRoot: "0x8d77db2a63cee63ae6d793f839a7513dfc50194f325b96a5326d724f5dc16320",
transactionsRoot: "0xe4ce5f0e2fc5fd8a7ad55c2a31c522ded4054b89065c627d26230b45cd585fed",
receiptsRoot: "0x10b2f34da3e6a1db9498ab36bb17b063763b8eb33492ccc621491b33bcb62bdd",
logsBloom:
"0x18b80159addab073ac340045c4ef982442653840c8074a50159bd9626ae0590740d07273d0c859005b634059c8ca9bb18364573e7ebe79a40aa08225942370c3dc6c0af2ea33cba07900961de2b011aabb8024270d4626d1028a2f0dcd780c60ce933b169b02c8c329c18b000aaf08c98245d8ad949e7d61102d5516489fa924f390c3a71642d7e6044c85a20952568d60cf24c38baff04c244b10eac87a6da8bb32c1535ea2613064a246d598c02444624a8d5a1b201a4270a7868a97aa4530838c2e7a192a88e329daf0334c728b7c057f684f1d28c07d0d2c1dc63868a1088010ae0b661073142e468ae062151e00e5108400e1a99c4111153828610874bb",
difficulty: "0x0",
blockHeight: 0xf766a8,
gasLimit: "0x1c9c380",
gasUsed: "0xe6f194",
blockTimestamp: 0x639f69e3,
extraData: "0x406275696c64657230783639",
mixHash: "0xc1e37ce2b7ece4556ec87ea6d420a1a3610d49c58dfccec6998222fbf9cd64a2",
blockNonce: "0x0000000000000000",
baseFee: "0x2b96fa5cc",
},
{
hash: "0x2da4bf7cef55d6207af2095db5543df16acbd95dc66eef02d9764277c5b0895d",
parentHash: "0xde18012932b21820fbb48ef85b46774873383e75b062bc0c6a4761fbe87bad13",
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
coinbase: "0x690b9a9e9aa1c9db991c7721a92d351db4fac990",
stateRoot: "0x1f101f54c3df5630c9d45224c95d71a57479992e174cdbda0c4ada30e657a465",
transactionsRoot: "0xc2b29438a5f55998879356cbc8006a90d2ba88a9841b3894c8da5840dd797f19",
receiptsRoot: "0xbd3608b6af5464b446db44fd289a980f417447b31ff15dd6d48c72fc8f4fef8d",
logsBloom:
"0xd9e5f4f1e559388eb8193295ab2d3aab30c588d31e381c4060715d0a7ce607360b15d7a0d88e406c60135e0abcecd1d816c11f8cbbb2a80a9b4a00375d6cf356cb78f2934261ab09ea03df29dab5dbe4aefea506f7fd0eaa1a8b1fc8db5079613a49d80ca7e7997a20c7158399022c1dc9853f5b401b86587249fc96ca6fbc2dab1fdeb203ca258c94dd0bc821b38f9f60128591f3cd224c5c207b76b754e537bef8ebe731effae356235dd71bd7b5494bead124a8b5bb0ba02e46721d3ec3c20608880b1d35a17f6a1027d20c7b902e5d7b2ec8177b1aff9dcfbb4729d1e3201e78fa1b3c30e66a590cb5a7cac7afe0b0b1a6c94d5e39c9a20908358b805c81",
difficulty: "0x0",
blockHeight: 0xf766d8,
gasLimit: "0x1c9c380",
gasUsed: "0xf8adad",
blockTimestamp: 0x639f6c23,
extraData: "0x6275696c64657230783639",
mixHash: "0x6066061b78b385483d960faa29ee40e79ea67769f5e697ecb70a0fce677804af",
blockNonce: "0x0000000000000000",
baseFee: "0x2aca8b608",
},
{
hash: "0x4ddeee3e8d62e961080711e48d8083f164789e78cc90e4362c133063b566d64a",
parentHash: "0x9d190c6d49352d628e321853967dd499d78c521daad73652ed1978db5652f58a",
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
coinbase: "0xcd458d7f11023556cc9058f729831a038cb8df9c",
stateRoot: "0x3620665f9d094aac16e0762b733e814f4e09177a232f85d406271b60e4f2b58f",
transactionsRoot: "0x200f5acb65631c48c32c94ae95afe095134132939a01422da5c7c6d0e7f62cb3",
receiptsRoot: "0xc140420782bc76ff326d18b13427c991e9434a554b9ae82bbf09cca7b6ae4036",
logsBloom:
"0x00a8cd20c1402037d2a51100c0895279410502288134d22313912bb7b42e504f850f417d9000000a41949b284b40210406019c0e28122d462c05c11120ac2c680800c0348066a23e7a9e042a9d20e4e0041114830d443160a46b5e02ec300d41330cf0652602140e1580b4c82d1228c000005be72c900f7152093d93ca4880062185952cacc6c8d1405a0c5823bb4284a04a44c92b41462c2420a870685438809a99850acc936c408c24e882a01517086a20a067a2e4e01a20e106078828706c7c00a0234e6830c80b911900291a134475208a4335ab0018a9048d4628186043303b722a79645a104c0e12a506404f45c428660a105d105010482852540b9a6b",
difficulty: "0x2ae28b0d3154b6",
blockHeight: 0xecb6fc,
gasLimit: "0x1c9c30d",
gasUsed: "0xb93955",
blockTimestamp: 0x631d8207,
extraData: "0x706f6f6c696e2e636f6d2050cabdd319bf3175",
mixHash: "0x18d61005875e902e1bbba1045fd6701df170230c0ffb37f2e77fbc2051b987cf",
blockNonce: "0xe8775f73466671e3",
baseFee: "0x18c9de157",
},
];
function encodeHeader(test: IImportTestConfig): string {
return RLP.encode([
test.parentHash,
test.uncleHash,
test.coinbase,
test.stateRoot,
test.transactionsRoot,
test.receiptsRoot,
test.logsBloom,
BigNumber.from(test.difficulty).isZero() ? "0x" : BigNumber.from(test.difficulty).toHexString(),
BigNumber.from(test.blockHeight).toHexString(),
BigNumber.from(test.gasLimit).toHexString(),
BigNumber.from(test.gasUsed).toHexString(),
BigNumber.from(test.blockTimestamp).toHexString(),
test.extraData,
test.mixHash,
test.blockNonce,
BigNumber.from(test.baseFee).toHexString(),
]);
}
describe("L1BlockContainer", async () => {
let container: L1BlockContainer;
for (const test of testcases) {
context(`import block[${test.hash}] height[${test.blockHeight}]`, async () => {
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const L1BlockContainer = await ethers.getContractFactory("L1BlockContainer", deployer);
container = await L1BlockContainer.deploy(deployer.address);
});
it("should revert, when sender not allowed", async () => {
const [deployer] = await ethers.getSigners();
await container.initialize(
test.parentHash,
test.blockHeight - 1,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
const whitelist = await Whitelist.deploy(deployer.address);
await container.updateWhitelist(whitelist.address);
await expect(container.importBlockHeader(constants.HashZero, [], false)).to.revertedWith(
"Not whitelisted sender"
);
});
it("should revert, when block hash mismatch", async () => {
await container.initialize(
test.parentHash,
test.blockHeight - 1,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.parentHash, headerRLP, false)).to.revertedWith(
"Block hash mismatch"
);
});
it("should revert, when has extra bytes", async () => {
await container.initialize(
test.parentHash,
test.blockHeight - 1,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.hash, concat([headerRLP, "0x00"]), false)).to.revertedWith(
"Header RLP length mismatch"
);
});
it("should revert, when parent not imported", async () => {
await container.initialize(
constants.HashZero,
test.blockHeight - 1,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.hash, headerRLP, false)).to.revertedWith("Parent not imported");
});
it("should revert, when block height mismatch", async () => {
await container.initialize(
test.parentHash,
test.blockHeight,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.hash, headerRLP, false)).to.revertedWith("Block height mismatch");
});
it("should revert, when parent block has larger timestamp", async () => {
await container.initialize(
test.parentHash,
test.blockHeight - 1,
test.blockTimestamp + 1,
test.baseFee,
test.stateRoot
);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.hash, headerRLP, false)).to.revertedWith(
"Parent block has larger timestamp"
);
});
it(`should succeed`, async () => {
await container.initialize(
test.parentHash,
test.blockHeight - 1,
test.blockTimestamp - 1,
test.baseFee,
test.stateRoot
);
expect(await container.latestBlockHash()).to.eq(test.parentHash);
const headerRLP = encodeHeader(test);
await expect(container.importBlockHeader(test.hash, headerRLP, false))
.to.emit(container, "ImportBlock")
.withArgs(test.hash, test.blockHeight, test.blockTimestamp, test.baseFee, test.stateRoot);
expect(await container.getStateRoot(test.hash)).to.eq(test.stateRoot);
expect(await container.getBlockTimestamp(test.hash)).to.eq(test.blockTimestamp);
expect(await container.latestBlockHash()).to.eq(test.hash);
expect(await container.latestBaseFee()).to.eq(test.baseFee);
expect(await container.latestBlockNumber()).to.eq(test.blockHeight);
expect(await container.latestBlockTimestamp()).to.eq(test.blockTimestamp);
});
});
}
});

View File

@@ -0,0 +1,120 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { expect } from "chai";
import { concat } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { MockPatriciaMerkleTrieVerifier } from "../typechain";
interface ITestConfig {
block: number;
account: string;
storage: string;
expectedRoot: string;
expectedValue: string;
accountProof: string[];
storageProof: string[];
}
const testcases: Array<ITestConfig> = [
{
block: 16212738,
account: "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
storage: "0xb17c5049c06186507ed9d55e735dc0342e08579866e7ed881de010624b3896dd",
expectedRoot: "0x5dd9637058e605949321a683ab1e6c56ae6041a05cdf97355696f93309799391",
expectedValue: "0x00000000000000000000000000000000000000000000000052ab3594ab17a60b",
accountProof: [
"0xf90211a04cfe239817b200a743356abfc6e5b08d9951e90f3932f57a7c12014d9968b040a02c94e10276ccd6a461e94da963f126e396d12f50a3389966705dbb0ece7f67aca0f28acd17ade90c99e92e3e155a46076ef89f51f22caf45ec8f5affc240073cf6a0f26e26128daf3ecbb7a37eb10afad22741725a1ce43819f1f573da6f1e6fc2c9a020e3325c4125cde3a948d7a68530a8f8979591c17f445bf96b4716d64833f6c8a0def41ac472c300aed57feb95cf7426fcca53d4c0007afabfb0d6c4d3b4ad95fea0a65435daeb1a371b29c3037a01230d19872e2bdb1a97aeafe610df01dd9937c3a0c4d93f1c9037597d4b07388551773f9578203a8abf4f3bfabd6eaf58070f32d5a0d008f86640c7313e00f897b2b9416da54ea2182fa98785e583367e42035fc0baa072981aa04d506601aeb2cf8689ff23dff82a52a29e1d401dfe96baa2550b977ea065a9e75f35c97436334ad2498ea3fe4296829ad7b005e65af34fd10ddb368631a0b326e41a44cadb3e78fd84571f5e0f9da6b5ee5dfcfb1c88a6b1fcdb13fe6beca0e32897d4de5966ed95729c2a68354d1ef7f27a9b8a5cdaec592965bcc5b339d3a0022b816b5afca265766e67427b45682ade004a780e7e3667b41930a1d087230ea0dc0eb205c8cc3642fe2f03252b238891819a8528f30fc0207c2271290e8de9a1a0554966428442b6b9188a4b4945aa550f32615e4b1b1d3776b07c9d18ba0146af80",
"0xf90211a00ee4696104bbdba0094ca30daa4eae871b1dc0c2ccb07d8f24c7c89193904607a080893f1dc4ded5ddfa49eb36a618a0c3525c58890ae7e4be006e6988980cd15ca04ad58fd70d3cabb3d59144f729105997d3f07c0223a41dbc9b1507d1aa7825cba03bbe2d774e64d6577f1e90ef42b20664b4707649b00e957e369e97a1f03dc762a0107ec21d49839dbbb267fe5ca58c615af81df1752b7f49c8ce2da952a132cebba0d4bd3d22a406960040f86aa8fff6c9e29a2337560145730f9e3612844d67dd1ea09b1edb047a63e19ba02f894a19bfe2d4fcb9052e0dddd6d40dfa52351b3e973ea0a397a48dcdbeef118776a2cbd06fa400d1bedc99ed9f61d4f4513cc6aa7c29daa031f5b24b9027eef2c12015db45ef79c6f57681a6818173e00ddb1050567be4aea035748b7d80884cd8ee2d53523e9aa49186091b28dadd09e1a4f94b8ba3e3c995a055f851741c59287e048a908539c348c6555c098ac16fa074395d530b67f076b9a0f189025cd5b04a3b73bcdbdfa0d9637a0ff389f7b9a481efc2cb984484cb106ea0d7e874ea3b71239bbdb6f01287f58508816b270a218293987e42618f6b982777a0447c72ec8a23e35ba10b61462c11c654653556682994de6ea7866a538320fd3ea0d52ef935a9abaa771614224e75c9d06b8d94270a5ab11b95635f3d646bc7f80fa020d93fff55bcd20b764b2a4261b448cac23fa19dd64dbb7d33345a27b1c02dce80",
"0xf90211a0aa579a2bef0815ecbe72dcc7a478494f4ddf6e6a821fed8b8e5e22f96be95fb1a0f7be1171a1b188f0159315731796369ea543043b3f2076ad688f2bda5315d4f6a0ac7901c3cece0eafdb607bf3f6981aac2741804c77b0db674d7bc69c6e0841d5a0c1bf87d0fc7ff63bc43bb453920d13b00ed2e126931fe431206519e47f2aff58a0fbb3f885d4e17a30daad80568b76ca24b70f95ddb3840598c9cbf5499caa13d2a009566520886f90ae776076398c3393585110ea164c8a1e6c47980ec67fbbbf9ea0709eec3f022710443237d2ee3d967abb9fe295b335dbc783768cc2396ba0b28ea02003180468280c9bf5819207be30c9f3176a0cd68a57b43fe353565a5d42b62aa09817a0745b614df9aa5268081c06eaa5d6c057c86e0253ee26f081b9fc5487a1a073265752f6c91428565dab106305f47b8c609522ee518b4f391c7f8951f5394fa03ef7529bb0ee4030c994910ba8d8cd0eafbfcc4d7f7a0fe9b528b09360ab12e0a093330c4eb263124f35f26572747b59957744f1c39cb91e413b599d27e07dcaf6a022dec6cd45c7db6901c364be4226d54fd74552af51d835d605d1efde50a374c0a0007c30f8707814de913a9edd9bf09fe614676b2ed5497ea06bd157e5ec1718c2a0e6d9335dee9c32e74ae736ddccb15bbbe3ca07c347e7d038a6103877d1cefd31a02d5576458404a2e48f2263741a2c5ff181ff03939e1952cd616412c98edacdae80",
"0xf90211a0f10b8f4ec168083a8021ac8201365f45d63ad461fdf4cf8c50342499f197f5f3a02341a492492fa8323462dad1af3ab7094b76ae3666c4d081ec8d24c9e0da451da0017ce2794246eda28f5b1b3fee05dd269dabb29f71799ca7c3dca67764132c82a02b629e4b9b699796651ad13840a0d06721041de42d09f22ddf3e0f7c89ade82aa076d2c3f907c842c8e76454503b7ef1c9f52e93fc3830b4b8cd63dadeefa8fd4da09284abd6431d107e527627dd78d3cc2a655f7e364470ef620fb7fada3fcece73a00afefb47543ea7c9866f274ab4aa14ee261ffcd0c8b7c8c4e66f1ff02eda6ed3a02045ebe244660a6cae3467637f3e0b27c003cefe72681c47acb2e9766c1f17c7a08fc1ee83563261f1104687cefe451fedcff6caf2dae3f3a2a382b1a0bad7109ba00afa5fe38079cb86511e0842b62af452a78ecd43dc6c54351ed3ec923769088ca0a9c36efeb72874a37dd282f309ff318b6e9464ece30c63ba80bfbc5e5f76f163a030b918045e6233a81833658889f54cedef0f1052aa56e673649a955bc8fee94aa0eae7097667819b85f90f7d4a72d9a8147dccf5fbd744050a743f029e0a79c725a0671e20fc1d94cdb487e0a8cb8c752fd3b4c2f759b9a905134f7a880e1dcdc96da0425857c455a0e10c7cae230d3b3a3309ff5732b55ca2186cc9ddaecff5460490a0b10db994f51f52b29f43281c561f2c62f2be921c5f585fb441760ce9aa4d3d1a80",
"0xf90211a0fd942eae2655a391e59dc2779f53209542fcc140b96e4b93cff3d8cb417e6efba0bd3535c9bfa5a7b939c7dff9307610a5958f8a785d2dcf7eeaf84624d0e457cca05ce0a4917922d7b302fca1badd446897f360b11d60be9802c45136166a81dc79a0731d140390c684a63ecf3ba9d392c73b8fb1bf2864d4b90eff813e953f66ac4aa010bb21166ea999880a179d6669704ecf6c50ea9e47eb674d9b077a7d4c4f9baba085dab7106099e19e2c978e8e814a7749af5bbdbe1131333713e612898a8d62c1a012720a68371573fe69f384950b871b09a44af5fe2c4870f231a58e07190c1b36a089e816024bd04ad03ca66e47323feaf5d975b3ec41b46fb124ba9a9299c26da7a0827ecf55875811b3b25696b3737ead4817641d29ed46d5c4892d098357b699e2a06450a823c9feb0adcd77aec2d3156057f2c93f83670da26afed344e2c6a8f5a7a045fd2f25ecd36a65186513e409fa3b3e3f3a0f7f60f5951c76d2ce10235db1bfa06819009da16eeacf224ce65fc7dc8052cc2f4dd32813441801ac3be9e9db98c5a0ae81fa6db4342f607a35aea6a10047c1848c9251d87140efd6c24685ab964b08a0ee867ebe92374b199244599920a3a0fd13ca24030ae6c1d1af1ac8523a8968faa007dcd579f048937f2bb7a388a158f565b3338e35d37f455d2d6861ca208183bea0dbc271c1b2865a38476161513c4a590807f8db6f2a4de8db1e9c142a8a15349580",
"0xf90211a02b207484d2fd6781a1e4ae62c2c4171477bd5b929df2b14904cd4f36c61363cba04cbd3a34c4d4f60bc5590d8b5859da8ac83ea7a8a0197dbbc528434651b0f748a0beafa9a7e0b2073100526355a341de7a1a839c7f7322a594bdc9ed4d73d72283a0249717659c4e7adda14416a804ba5c9b305f9da2531a3ff6e6d74fca6380f4c2a09b5d4bcf5c805d1c38f283bca39ce28077cbe0daed23312d666cde49134a4d2da03930a91cdfb11a85632972832202e0ab4027f78049f828a099327513be660ed0a0ec6a17d51d787c382575d6798093a015e8383bb276b6fb291d529498789ada09a0f54c88077fa118092db43a93d89c86ec879da12d33e6e5dd89b10b7fb115bc54a0e1a3af76bd6a0b1f4419a62bc73439c641c612a912dc8d190e8e81c8c15dd561a097934d75e361d115ea93e2fdc0c91a54d59414f0daa2ac1991b6651ae6571f9ca009abf1666d7d9202849314692d5ce1e51e5629727701044b37532ab3f9be50c0a094561fbec829ff4807911e0169bcb59159bf8d478fe7116cd652c179c28342f1a058ea9466450f42b25cc3298911ebeb081b6bc73f3c414f0d36244d331cc18c5da0697343bd56fce1c2d34ebb3baa06b3f5aba4851e3b60436e545a2616ef47cb73a06ef38fec665b8eb25934622af1112b9a9d52408c94d2c0124d6e24b7ff4296c0a0451066ddc0cd1a63e22d096eab595e74c8e8509616650d76a0eedd35f0c228b180",
"0xf8b1a02a85b6c4adf828a068d39f7bf4115a4544ebf32e007d63957a28ee21eb8dcd57a0344f34e01710ba897da06172844f373b281598b859086cf00c546594b955b870808080a0525e7dd1bf391cf7df9ffaaa07093363a2c7a1c7d467d01403e368bd8c1f4e5680808080808080a0235db60b9fecfc721d53cb6624da22433e765569a8312e86a6f0b47faf4a2a23a06c72cff8105f47b356034e5586745859f6290eb366bde35b9e819af9dcdfdd8d8080",
"0xf8719d3da65bd257638cf8cf09b8238888947cc3c0bea2aa2cc3f1c4ac7a3002b851f84f018b03235ac0b3723f4d6c6f61a0f3ea73ed7d35e887e1b2b8ac13e8645eeec0da8210c16da47b0f3b0894011c3fa0d0a06b12ac47863b5c7be4185c2deaad1c61557033f56c7d4ea74429cbb25e23",
],
storageProof: [
"0xf90211a04571622a123ea7cf0d9534115e5e6b2fd058f94306979a373b226979a8c83af3a0293a081f517366f69769840098d809396caf7ff3942c3b16aa641b23723301b4a0605ef8aa3eb98c75406d2781067f9d55804b4cd981614aa09f9f6cb0d87a91b0a09d7f20c3afe36c59119c1308a6d7a3efca7c6588acc14364c0e70b5f7f5ecf97a0ce1729eeec5fb5d9d3fed295e469da960bce62cbbd4540efbb0eaf470b0014a5a0a69bd31a7f4267359dd41b93f03b949bdf4de072651b6929ea4e756bc6f088b6a0801ba6fed2d48d4706569a62678fb93ca48dc159fd8659b7100bc4070e3f24f8a0a58273972230f9ef6f74f1d3d1baa8795f82d0bc2c2313b7522a35cfad25ca7aa0be46e098b427907021d82e9d1d45ca4ef6305e3adacb71683f94e4656718ba14a083808d1c8c0ca4a5668cbe6faba42d927ef8df07f3581d06a9381084f0590defa00b6eaadae4a3d219a0e090a56cfdb17e31326e9d60802cf3a36e8ed0f14490f0a00146a284e0a8245d2c1f51ee97fdf9f4231caee252aab01fcf3c4a619f39663fa00b68dbe3928080b43cfc2533fffee4ed91abff24109f08a3ba26e8aaae18c7cca0345de27acef95642cf996a0485bd0242281c7ed9fddd6bad6f55e6bff04588afa092099ec8d9e6dfea3ee5fe4ce7b18f9e513cd7229f7a8de6ebf93ff5ce757232a0963d3dcfec3a80dc1073eb2292be246d81b4462b8347511d335b4c537f87c29a80",
"0xf90211a089a4ed194eaf9e272c155d2e692b5585c6a38bd04ae96e487bcc231771701f98a07a7de6dadac670c4062757c16976c4fd98c587a47a687b32b640375fd7e825b8a0da765585e24133176d2b38376f362b666800735c46e6358bdb526d03f068f97fa08acba1cd699af52508c374da47250b1d2be1a43a7d25aff247ec717b8a534213a0e74be231dfa53a30bd3157e6f702f14619887946e2a447d31dcac87f391a50c9a0b8448e3cc5dd4e9728c7fff44ec252bdade1618a63d363e86e0e6dc4c77de5f2a0f95aadc2a07fb025f3492fa7d15224bab718a908b1fdecec39900f905273d8fea0b76a4d3edfbf657e6d87e2e3920b478fb8f4bdba7844a7ab23798e1bed4abccba0fd70d97eaebf9d1b9e65dcb960bc1b7e96b03a40dfcd490ebf8bc5bab8c413b6a0fb3fecd1f77557f554c6d22b86e9dfb27fe644d13c8e53c24b64e7b3f3791cd9a039cce3c9632ea42f008bb8fd3412e94dea053d4a2baa41c4a2517b34ba8e4405a066b4b4db0e22d9fa76395494b571b7c0cc1cd18ccd332e8a59bfa03b2be2889aa0a80a5acaeeb595a5740f1844d32eab4d56fffe53176c21a464ff34a8cda84101a0f454d635fa0657c436c5fc2b6a071c62e4c01c139dc2ee544dd8997f2ee9242aa07fa5c3c8e2be0f1255f49383046703291953d29debf61376f862edd3c5b4cf76a0a30f1b5c1c3c4b307a2ac472c81f79283803e88403a5ccee7750ce7175c0b0d380",
"0xf90211a083f3f2d187ac7939ccbb8690863f341b252909afec4dcce275a2e7318e1f15d2a08fdbf9e41ea870a7ec2aa31ce43a682b8e2fffd0988bb934c03dc14e1988952aa04b9e7db219d192320bfdac399670cff992e0aa5dc25d2f3de56f4f53e5373456a07f27f9e5efb3a92a1f2f3e6d8fd4bfaf9015b9fdad8715ba16d30c211aa0530aa07cc6af0533c32fe1af0e5d4b149186970040ac5c69c2db7805774a65532fa064a0f15e9c0dbdd4f935d3aa719506ae1fb7297258d18abe03111d9e5221d6bfb8cda04572757dae6365a28b493c63503809a9dd6927b6e6f11f791e9c2cec92b80513a0d1ac01dd696504ca20c087bea731dac1b8c48d26e5dad36d80e34496ee20b46fa02d879c981e1706e0720b3efa7093308a499d57ccbf9648cba78026b3e7883795a03f007ce733ee8a522776e46bbc5dd28ea33db0ae4702d733926d83b28c4d0181a01b1858a30125abe3a401112f676d6a4b669ac9495b34f89691c075ec7630a45da09d22b122a2fd0db8cc2397c0c8e05fe317e3bc8aa407af8b85ca300d9411dc0da04ad97d66e54c7a2a76bc6729384080115dc3ba5e6a7c5269470372ba6d22eeafa0dcfe09b848078f66db11284e093991436f85ef26ddb3dc2efcf56e4bf05e6101a0e641c7a710a5b8a3b465e05b09e4868d9e54353b50d29eeccc9e829ea314041da063ba309481ffd1118153e75496d66bc7a96d37f32c63f4e731e56abe4fa5f12880",
"0xf90211a00a62828ba9909a92bad0ddff29537a58e176fb8af1d76292813a72f5661ea282a0f037cbce7cbacb3343cdf899fd145917e7cf18deddf5b2d8a94027968f9f1624a064774630a8d992b0888514b5e1dc2fdd37b8a214e6bd39d3689eaf74bf65bf68a0b6ee7661ab782818ac639c03784ab65eecbb06d79d251cd8c25627e51ba5b94da0c1dfabca29a2ae57d88e29f0ea94bb3a825d4b884c7f088ab4261b5900635ecba01bf409b8577e89fe49afa62ec117c32a9beac5f8e8cce54adeb3bd501c15cb80a08d7b60700564e51011a00159786683d707b676f41214b3e538b074fc79484748a08e58472318ad40f9498b98a599d260a80298a2cba39cf45d0bff8d91ae2e4852a04443244bd4654d707e3700d112783b837070111ba8a2f0f11781d623c3990754a0750eac11d5f2be0746f87df3cf9849ccb8f13c831936a745abd37fc464d758eea06311c8c2cbdfc4ff1a7e550477cf38ddc35cf57579d0f842801a9ad6fe50c45da0c6ceee02d855cef0db230d186d9e37b8777b8313a22b3dd6946143da503919d4a08669ea1760b9551901c57fd56411368ed8de861bb4602d26f93005d0101fd195a0285993aee29c28d2239022fbda7df02d06082e0246431b7671edda601c6e5cc6a047bfd76124562bb812ec81f5b286e09907eba7e9b1efa72d4ac7a49b82eed957a054bf6597873bf09bfd3df04d4fdff771c02f9d728d51ed1ef00f6b053f3282f280",
"0xf901f1a0c5a1504268a750c1c90b7841d99e6934f977193c72d44ba456fc9a263fb3ea45a0924bbfcbd6d2e7a3f9bb5ec1898a1ec0b98880f747991e96696bd0b565e1f83aa07ccd4b2cea9ff079bea41f9d704c21e7f9d3fbaa83895f34970585873d5bd9e2a0b2e313a02508e8a0dfa115612c1400f8cf9d5cc23369b6aefd7c1fceca7dc943a0e19964c5618fe9f1f590eaddc17787071442649385109b9324beb8bf51a0d2d4a0b022d54d33a1c62278d7784996fddb4c7dcab2fc3c2287c6840edc3762e3d034a0a8381f53de80c0d06ca7288457d82fc1cef37af3e08abbed93a61d48d7c9ca1ba03f916faed29b999d16e22fcc2ad463681a42339b24fdca5a1323b5e55d5650f3a0eb6adbd0b998ec882b91b44ab6ccf20050962c45b68d4e42d2f0e3e1c9384952a009190c615b4dab60e7c1940f2b3b87e3636a655b29dd8b65b99f497ab4fbc395a0156deb01c2c14daf7c043555c077b4af3c5aac031d75cf9e4f704280983c67c8a09dd3b43b4514cfa57218538527defb69638f108383a9d95ad07a296d30bd5bbf80a01316d876cd6803dd122538f308cf116b79278393d979769a121f8354c925cda0a0324232c83f8194263838f7105b67fb93b805c027d6419a98f3c40937b9502132a0cf19102ca5c74f4e088ca39ded150e7a9d5d1bc5d9263012c7e843dfdec8386580",
"0xf8718080808080a0795b2bc0fec80623a0785ed76761d1e9abbf37b806b4b1664a22c1dac557d79080a09831b7f896628cd55e9cec00f168d92c748a1dae2fc55774f0fdc80ae64294a08080808080a020edc6edb75de3cfde19500957b220fffbfc581e93b5b6e307fac078a8b14783808080",
"0xe99e20e18d2fc45a3ea90621b218552f932e0a2a920a290d1c6bda98db9ab133898852ab3594ab17a60b",
],
},
{
block: 16212787,
account: "0x9467a2d9c07cebce3708ca32eeb2b9219aeb31b8",
storage: "0x000000000000000000000000000000000000000000000000000000000000000a",
expectedRoot: "0x16b9e5246ca2dad361d440d5524cb431ca30d0575fc21f4e4242f7611fa2a212",
expectedValue: "0x639f404f0000000000031d02a5d2b33515ec000000000000072629ee1252f3a0",
accountProof: [
"0xf90211a0aa686b484fd06fd6a76b4b37cbf3965553120d61b93dc354e1e32e3442fff947a0c8401b3aaef041fd79bcf69bc8eae7220b1932973d088c368422b43e7fa99d3ea03d14c01a86a93d483dae0f088ccd5f64ee3346bba6590bedcc6ed4975d36c0c6a0c64f3e49789294f22c3cb3bfdc78406933b8a47f743de5c999599f814cd8d166a080205a023284e4f9905946076d9dc0c029fca1452743becfba43ae49b0c09d18a04e13c9c6719f3519cb7828514f1b0e393398c7dfb0d703980062e52a3faffad1a0e806c685e60d3b312f1e740422728358f9992e4b7cf62c904c8c01265e88fac0a0f21e7ee12a407fe11cb0950f63ef5dcf62d26fa599f40136ec057c684ccaef73a0bde4594be3b1be7c4312c6ecf81ba8cd8057331563feddd4fdbabf3c67385fbba008ff9a89a68d8a8f6cec81a8553ff72043c4dcdc1ce784874c3fa5e76916f4eca01c5e489af3e55abfdee369a10075b761f58be65d5d589742ca8c6098db88e9c5a05b212b9a9b393541dec0d34c4908a194ccd8c6a21063429521308840c8b66d32a031052338c42361d910eee1c3ec4b7be3400c5cd97a7f8aabd3f5ac81da0c8395a0850317a18f8494eeab20c8015e5d863b43587a7dd3a7efd41a921ff62de926dda09e6e76b343415cf3105ecbd67e99f004b31eb7123f3e3a614ad808557d78c34fa030915874eb78ae682f3d74a727227fa86b204fa367256fd4a50767ed4c35bebb80",
"0xf90211a0122c3b5a88702fe6bc3d3464e903d0d1aababc35f259eac6b9111e5b753de6a0a0bf670757a4652ae24e5bd2fe9cacbdda79924bd6091330b950b1473dfec103f3a090ee1dba46441ba0126608d28b0023f0ae8401eda749e90d8550f2d3ca4ccf1ca0deb3887fd765e1c5db19b353dca2ece691dfc2f2c7c0a1c298635e3264d8a05ba06af91d067bdae7d64e34b2d654b08815fc43bdc4193482e9aa58e1fd852841e2a02518d875bdeea78fc832724ad33bbc66a654a1670c6bdf544a060941f90a31d1a0d7e69dfbfc026a105ec5ec68062c6affc1115ae3ad7a70e4ab854f9c914f2cfba0611e45cb73f473325c3d0ad494927e1d1053614c17cec3dd04161248305b3c9ca09767470f4299e3dbea4978fc989ca44abdef26602e3351cea0ef2885dc0e66baa060176e7f197f28205684e6b5ccbb83c5494ac86ef5483094fa3480728b11bf63a0c038f27c7e94887708465bf77ff37de506f5cb29e9a355d4b16d426e12f2bf59a080a4b6849ca41469ec77dba2d4d3ba0b0da9a36e5a6c0451e588a31af5981179a0b7fc37446eafbe040ba963a25e907af5a5d1c584d31198c12e28499a8377b249a0550e5984cd4ee2beb3b1d2af589e0a4954d8da7167896ac12985e1d781e3e98da098ea9d1574fc5431dd7342ea8467c5369ddee70b33ca37f30230e21d9a995d7da06cece45972cba1083ea30c7563c9639d398749575ec229e634f79e1ab637dd6c80",
"0xf90211a0a3afae41153cd80f43b9b413b8fb57481fac6882c1f6097117cde8f8aaed059ea0730760d301e2b18a9cd4b3f777d91bfff8424bf64c05adceb8160532728cb699a074588c944add6aba03154d7bd8b543f149dd9629f46d8da52abc9e41be988a74a0b8ae67ef514d0dad520cdc9103c2702ad40a7b0c343aab9be74d72d568902540a0345dfe1d6b3fbb5c9d0aa731fb083d5db76b4dfe22d5b1a789c78a921589082ea0cc5c0989644c549f573ead05887340e201e92f7a5bd9cfe7b57e3bb46d47613ba0002ae2795f3286b54b45e25fd66ed6173ba4bbe56393f7f27407cd559a2d259ea018cce2547825efce8cf5e6fe14d88cb7899a1d8768dae861c0e263a06640e5e0a05a6a075ccc448ab78a34ed3ee7d56a1b179a046be98a1831db18f43637638d04a0fe2b2ac494af3af2c28198dc97bfd165288108e0d2eff941cc5d115461c799fda02d1de5eb58ae72173353aa94335766bb360eef79b6925fe5f254f0e3caa8941ba0b63901c2fd1c61292d32f049dd699bf39c4019b1ac7ab12907804a1633d288b8a0071290317e54993ff32e0ab04d28b920105eeadc917e44449c4ca2fd80adf9aba0fd86afbc5d8ac6357d6ba6f13f0d08737d1f95d49bc1ef1d19ddee3dbc4188ffa0b1cf7db5488cd60ae077821f0aec741b51f8e8c553eaeed4524159373aa98d7fa0c695f9be60487243c29023e469d7af9e37661e325a577247516475e51d6757de80",
"0xf90211a090b58facddd3e83bdf8b1553a2c42b07fac5c1da069c73be25f30619088cb480a03867abbc8789869f4b7b5cc4799980299cc3012ec7fce70fb7dea2e5995a9a2ca00c3948797fbbfd4879bc72b5ec1eeba993bdbf4f8b39ae8f63c94cb2dfb89916a00796ca2b7894372e41a3331413a5e776eaaffad05ec03e240966e7ba8330f045a0713935c0c8cfc67afb8a35c948b4239710a5e7d61b5bf9d4e3d6e88e4e7aa28ca036caba99dee8e52ccd1ed12972e6c3ce4a28e160bd7542349338b692c27b5a51a02d1d87889d5e1c16690ac8b7ff3642f6814e42fe6cd6e00e108b759555f2cca0a0cc4be174afaf83b4b1d4fa64374817759956315fb684326fafeb238a41fb0ec8a09dabf40050d9ed69f994f8b82f14e037dec59c6a2a24a9879e184b546ea71448a0c77815db0d8d7eda3df1b8354ac007fd93f6190f20616e7b93259d89f1b0ac6ca09c105e9c25f2f480ef8a50c31bfdd0eef120741c9a1caa6f2278ab7fff0e4651a045ef65a0c419433050e6cc57892fac712cd3cb835da30f2f8cc249b872d6274ea0a457eef99c7beaf2b365cfac520db40b375a0707a0aa7bf234a04ec5746e7daea0e4d2b13f79715813fafb715534ed0d1474e044c7521694ae3bb1475e7d570f42a034143e125fb181ec980641ba63a9d19a005eca2081bf1e1e77572c172c8481cca04747c648752a28511842c2d63410bc6a554ca6d13aa3541edd6e7759ed62b2ac80",
"0xf90211a02cf6e48c3852fd7b3a31e6922cb756425da526a164faa2b32f19b21187503ce3a093f0f615e47ec246a5cae41dd6236374287e3efaa9c17611bed4f2621f5ea7e5a0d6c55b3818c48f66570964ab6f184094948ea1d808d26a66a6d0e8195674d143a0ac7dc18dead02fbd3763e5d5fee4d2c032ea207df6bdc26900f0d10ff2c47f8fa0c037ea2e7608348529093c9b9fec3b32d8288bd0b6ac3ae242443f4bda8e9eefa028ead29005c86ca93d969b2963b3eed06ec81dbe7c7c3064d79c6aa033de3246a0f24e9a73c866d6e7f1d411e98da53c76020db588f4b214d44ad6e536d2b7f1e7a0207fd73036d92ceddc5da5c0504448c6c2704735bc6470d10193861e15530708a020f669676f97c6585f7cbe5e405c4f9a4964fad36fe4dd6aa13c6b80a60d901ba061b56b1bcd12005d252197b44f28f611d2cf4448ca57784a8f17ac2b23cfd519a0aad0bfda854bfaef052cc6659d84e69e4b0325e6b8fa394961694e2c3b758203a09da958cb8bc74373e66cf40708a152f31d2c6ac305fcd1af07a25e3e34801227a0edfef4c130b1198a28da1ae2fd66c33d2d1e98725424b9383dee7136360c7036a04c64086b040c6a3701a1b2bedead55797c95c5d635699e66950fcf9c6215ee02a00320a92427efbd2cbe8f70c7c74aa5db0c145b75148808a317a2ccab2cf437f9a0884d942adaa313a922d0883e8139fc6a92acf16e95d2c7d06b4e53a08fdab69280",
"0xf90211a037049228c0254f0105b8f461536b772d38df8e4b8bd7f908be72982a86a35961a0d23d1b2a16afe975ac636a8720e5d9fe14dd999e47f5d9e43fe86b2907134705a086cf6044b7e6be2a9c312cf4bf438d464f111fc19fc0abf80c8ab31644bebd06a05bc25ec41da09b0c76b897525589bd03dc90b482ec59e6a1ff14102217f2cd6ea086e9e5952917cf0e054e0e00e0085d7d3bb6a704e55ec5739b6705e4e6539d9fa0148e465f1f1f6095bbcb2feafc49ffd5f604b7439f9b4ab0437f8cd7acf1adf6a0bd2bb1bb25bf43758ed57d63ead3a619cc3a94d47be1b84b4208b24f5b80094ca037ad5b50e846bb85482548cc5a99a03e1db02aadbf61f1380f61bd9ad7ac4704a0a0967620f115f194f7a0c16c7e13492646507ac7dd8553e97b7ebf416228e1f0a0f42e67ae7d57f618596858a5a7239a6039b0dc751d42dbf47bfad47a36a5a59da0efbe74b7c05b343f3e29d1fbfcaab58789c99cd301b87442363efa0a2c7a395ba0c8b4d32dce4b607dc21e9c4ac3ed9757640c760582cc1ffa4679c4dbc2b2e0bfa057addd95ffe7c0de9774f2e3790a52f262515fa6a2a65a9fb785451a6e3ad2f4a094d55a6f5ae979bfc6c6f59928f2850206c5af3caedf39386939a053a2c7b79ea075b8f0a832023c355b067f3786edbea9547211d8cf2dca5f89f9a413b9b525c0a0757df921602607a9115e97c1ca0e4acbf0a2d4ff3bc6e7ae2b151b88359f190c80",
"0xf9017180a051c6427ab0bc0d3b0db47b82e69a31fec1670e8ffe2ec57356a512c82083a6a5a0dd0af4a616a626aea8529e07f9017ae356087c45c92ef851aedd845987cccc46a0457441ca9402fb91326638832a9a169e021608db12c58d0e7778c1b13add1afea0db1a3351b7f76cb3170ecc91fd0c687ad46378dd392944612f4c68bb9fbe1050a000c1cb0f8f7bd89d04fe5ed1da96fc769c67a27b3c822a8653397e7da6a04730a0b63f0c4914683ae30b031264fef21806ac7a1a32ccfd05c011ddd0202e06b275a0ac66fa130cd31b0e4b15b08965686162a3efb93e3a07ce45859b34e9a2b4112e80a0dfbd89ded3590a54e3b47e540457b06c754b7b0d22cab361a79adbde4e3d96c980a0cb72d7bbf7aab515231c32e9399359c91aff95accd474e39a091fa2b9e71259b80a04a2be13b00b2032cc0c7112be04907d8d0fa0968932abe8dfdda6c6bb07813a680a010d29a9d3186ad1e4ad1c518be391c44180ba8ce1db0f09a2c9ed23ea017733980",
"0xf8669d33239d97e43f5062453663ffd198f40e6120b1057a77480a17b59f8d8cb846f8440180a07a5f002403d62f9d1ab5b4684459d1a2e5170075efb41f51f94fdb30b5e6d46aa073f5b0f762a0557ec4b135108e719884532887167fa14c0d6b7807943d70d96d",
],
storageProof: [
"0xf90171a0b5a85440d5fc74ec55facadb9dbc0cbf35ae1eacdb841b17d6943721a7028fe680a073d52ce999835ee363c087004b4de88b619f66f3dc94d35be5e0b17869d7ece2a042bf377671e60c1d6aad75c93a25c72f0a0c7c2fdaf732b1ae508dc937ebc0be8080a0a32f55598dbc06e6742074f3ad6812f923f9a9f991e597763520cb939c5440df80a01a7798f0e3bd3bcf90d8150e03e9220c1547aa70037856b2961b5fa8dcaaf974a0fed5524862371f728f0e99114f0a09685044436cf34c22cfe4401ec4ec03ffcda06bf06cedf7b90669bac0f199b18bceca612452bb315f1386645bfbd52205a476a04d2c61e0aa8cffbb121715c333a6289570d450cd77f44d327212f404bdc932b6a0af144ae5e9f31fe6da35eac694185fdf07aefb9da7f4c652645bd7f0c7253e85a014f49d31860c00b7dcc901e44c39f3050b2e3f3b8013c0af887778813da9b97b80a0ce3ae4b74569ec95d0d116928f28245839a0c0629d2ec86081ee4896f9a2785880",
"0xf8518080a08bafc792d182fe0cac5c7dfb236bbc88dfd0ecf5505b681d1c256d75aa6858fa808080a03315f891bf9433a5415e982ba0f5b3d4497a2a44cb9a958d0830fe301fecae4d80808080808080808080",
"0xf843a0205a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8a1a0639f404f0000000000031d02a5d2b33515ec000000000000072629ee1252f3a0",
],
},
{
block: 16212808,
account: "0x55e617b7456abc2545cbb547e0102279a6c430c3",
storage: "0xa6bcd7cfb5e938d75f4330a273812b53f809408efd4332627beb0285fa4a8732",
expectedRoot: "0xcb31a10f1562c0c36bd4ceadaa95dd6234fdc02e8cb9357339e507e6b24584bf",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000001",
accountProof: [
"0xf90211a000cc1eab958a3f1de15398fa2b27750166c045e8eceb809b9d4501b4e02b7ddfa04da48723172332e782e1bf0e4fd8b1e5f98401d48596abd9a9705496167d7027a0dac4dba16da7ed6cfcca82063ccfc4f47b6fc53451538f7d0d1c7938f038940ea046d8e39429c81dd993f99d3b11ff4cda11bb8c8696b9d93472e71356b8041832a00b940d003e8cf026d1ada0202aa2abcae53098c85486eb7a534af6b3004f8443a090045c5a2ffd3bd39f80bdfa1d6e5fcf75105c81484ddfbc5dd4b4d73f6b66a6a0e4832a0fde78437c8fbedb522d2910e707bd1cf80f2e10b9c6040da0b05105d6a0a0e93dc2989f5868d5cbdd62ec70157cfc4b2064daff79586208e60d68221aa4a0fc7fb69a47e3d6b002c776b83f17fe0c9afbd922544e49bc69118d4912f814d4a0521e58e73a9dbcef30b0c02af1283b7907abadad2531687321fe8ab200027879a04d23897e8ec61a693cab8d2252fa69ac45731de79e47e40711c363b1e7062fcea0290185b97c0ee9f00882f0c50b493205d1a22128338dbb6bc7aa6770c7badb8fa090b9f84dc7ffe1e953274bd3076e5ac2c6faa2ffe8335a394eafad186f9491a3a07e8faa0688cfc77a7cf1011832d69385499ece50ca53ed08fefcd4aa07ce4409a0dfc42b6dac4d479a49a3d2d18ccbc5bd9f98f055294ad62cab84cbc6267f85d8a0b2f0f894b7cfd6834f2c9b58cd5941c8477961169ff6579dbf026a28a871ae6080",
"0xf90211a0cc88a0d04fc4367ca945630ce6266a93178092ce38ebd0d3976125c80d9e638ca0110ac16b660ba8dfc12258f9ade49be6245e7d229c4b40ad947c2bfe584a5ddca0b7884a63fdd62c909b9325f29ba55203c8368eb48c420e950354387594c4667ea01079982e5d8a202ed036f8bdd6ddc48b2eaa7c3e8c6efe8ac3369b997ddf2179a0210f2400b4faac315689001a789a6db9313cd835cdcd31df96eb65c433b115e6a089bb0553166bcb16346053be885e361d2d67cab44f7b959aa9cd826dd45ac61aa03b87244622f87055fd869434afd48c8b3ad5a9607b747f6c0d2a5f294e362ff8a06fc34c186c23de6726ff12f731925d0c022b1e28099c6ef1fa3f19423e974396a074eb379a4d71fa80108c16d71c9c9987bfe618f2f9c46dc42db411aa42984b78a01c679db1c89150fdf004859b6a69bed2045f4b8731b221e4a07eecf35539eb5aa0e5d137fd960b8cbb20606bf9e7e9fafbde54c409d1c4613dd80a17de5b47bc5ca037f5902b0eefe2f4d3a498af280683cbb6e24a257548f49c8a5541269da4960aa03ea41d2be7cbacac4f8ca44bf03d0bcd589a01c10a0bffafb8fb1da5a70e3068a0bb980af567ae4b71f44121bc9041c5e32d971511629ab7a40609a36f45981ccea0ff394894d8deeda5f8548486bb5feb71b870a8a431c0841886a38de9914bf188a05afa6d271d39956efa0b6a37f849587fc9ffdd0b21b0f03a7475075f3db941be80",
"0xf90211a0074eb00b4c1e2c7113935bee95ed4348415dc85b368967d4d5c4d76196af424ca0813772f3b53979306c3b6099563da22debe31524b46c489b304f5dd00e38790aa06c3522ff7176e7802c0565d7c0861d3b3d84bd4cdc335af7515cff8d08ca7fb7a0c768ae9e22fa57009ecc980b0550fdc1c0b1b4347a505cc4f2305681480cc792a06163dac89c2f3a035f43a559f71c94c2ef275974bb653c51db896c3326c460e1a06c56dbb2e85d467a81935889edc1e1fbda078896de4e9c4ffd33ff780137ce24a0e0da97eccfc2ad11d6489af5e6c646bbe8fae20c85fd1614e84589ac69b7b110a03eb954e27f07ead9cc4013f091b2b3fca8163d3ff052c6a4741d7b652161e4b8a0d1318e44803ed8fb732a840a9cd71eb2d0dadf601f8b8b77251b6de06776513da0a14970d414825f655862751df3bbd7fcbe9903adb663690cee115b4fe880a7c2a02cbead1eab47e6575d0d9d488311b4f16199fb8acabc3bac662c698d471649bba06acca947c81f7bf0b05c8218e615ed3642d1e812c3c696b76d19d9e95207ff89a0486a337786d2e2c7b3e963a9efb9216a79ee5cf61675b9aacc9cf3f35c403559a0e3743c73438b616f23323e8722d90afed16956be9d9763d35968619ef644d893a0437723f6d8ed5906cfc7f1254f80504c15f394db51148357f4c7ef0ad01833cfa032e215e323cfca2dbdbcf7056012ddadaaf9f8b9a4269f451ec28a19018fc76d80",
"0xf90211a079d20a2fe4ae7cb0a24db95f0cbb8a32bee53ee9910a9fe8959cea9d6d584993a01a486e762c0b7f5e99f02596a5250acb7d6d54d2626b7ba6c23d97931cbe3296a07b56e82ab24b02849378030bdca3ae3bfa19af23363c731537da3b47dc64c299a0a3c947fac18c8907db5321a2cf28e6fa0205d074db959c12e25fd0c9c64f64a1a09f658920397018751aa15e1feec3ec81dfd19988e590b663c6efefde407e2a3da0e5dcbae4c0fc4c9a050afb5ab81b71a76cd201d9ad724894518aecb7ae557079a02eedf89c6286ad0d5a8f24ead4c300646452191765d86b7217b7c503c9d93bdfa006d8c0bbf530c40ed8bcd89dcdb4cba927d3363c4eeb3fd7452c82f986506630a0fe7850edfeabfb584ab06d0a1599d64ae60b143cfb3bd8bd1cecd8e918b0c7f2a0d09c9c7f8b8280ec01c8fabdc75a5abfb9600e5961b3f1a2e62313e81bac9d26a03c5944032432601c395a4a4c31c3feed3606881df80407e7c6e45a82455f85b2a02dea3f595f1b07648456f39b39a6b337d5903373c5c194da04ecdbde82b40b41a08cff9a982b8e0fff7d588cf0fa0116f039db921c13fae79a5f7e8333ad4d3a18a043dac2f48ff878530faf63341dbe6baef3faddfd049f7db4c183079c77916b17a05e15585f071142178813285614aa8abd4c65a95f516df528ae2d3c6ee08521d6a0b0dfa07c70efbcc57f2b6cb7f77a7a20fc9537c0d984a676ec9926e55f71c77880",
"0xf90211a0fb721ad628030689ef65168ed001f566cd190e9c4d0219c02afdaeb004d4e214a051531d55c79e21f006c4741486cce4d8a6e613d761b3bde9ae2c8040a76b307ea02213a6ad7395c88dbdf89f0b29acf67c6aa1dc0e374cc6a7d9b6ec3d9fb5f373a0686884cfda50455fdff45f4170feee963de6d591770c269f4241e34563d70f37a0bb97314a2c0642f5abc066b8d53634887234dc37d3f9538124fcd27225b75733a01a296ff7ac3bd812e706959caa4748f04cc0729fdfb14388e244347b4a3cb685a0888eadb1b0d48cf03e73398f2aaef32bb4ac265f10f76c504db269250e88fd55a0b9a2aca21bcd60c94c2a80fdade417a56fabd041b6c0379159f02ff2fec8ab87a0db49371306dcf9e9d4ae471429c2bd4affe09cfc065792ba6410595f8beea2c7a0bba4db70589218f90ea48d1f870db783f8aae6cda9dbf72235e1284b97313dc7a023658240a8e60035480607bd2e2c780d0a305909fb06e9c6befe7701e78b596fa0d6f93b95c1fe2238e73c72e96cece7d21189e5c8e839bf0d42bf226a105c08ada08ccf4640a918ae9ea713e739e62f7b5c5bee3864b57c32e29065593cc8457171a0ad5dd98fe06ccf15db195de3aa8d071be23965c64839e10d1e9721cd64f26382a0e5a987a544eac0a3abc6ef9ddd12d8450b3e11f36d6239cd799595535ea0431ea054db8f7995108a2c8bf8976723682ce513241f75da97518991c6b77c050d398280",
"0xf90211a00dffb8e3f1d162560bd7fa2851c9475cd30bf41c78017372b8f5d40360831308a03348160896064725af3bb64dcf86a8fab726fdf442007fd3825c689553a314f8a0794309daeb0692594a7bdd16e884ebd75d598db16cd1005a9176fce869b3580aa01150f6cb195cd24622f9440d2ea824b33601d7d7983db0d925d39daa1695c950a04b61b2cd4bf2ec97550c27656b784df083bac7653352920b3bb0404bb09f1971a040e09010e7c233361e66eefeba4c03cd551580bf394ed6fb6cb8f921184a9f95a05ba6ce8c14236c7b63aedcf3f85603846062c43d165c207cb5418cb7f06dfafca0fb016acbbd9d14aa6bdb9b01e802ffe495032e97d124c9a0b65d10d4e715f39ca039c0a367372d7a14a34ecdc997bf67bc72755b4c7270effee6d90824aa15b087a0c7ddc7fc8c0341c56fc7e6228469b25f7af926b3fd099530c6097a02c869c41fa0ad11f0c6cad46f32f1ce178820941063777e2871fa38d5adfbd91672479213afa03b11a8fbd61e8e0f3f18a7c3573ec7a792edd83d3a6b8efac10ebb8b157ac17ea0ab266aa84b02ef61bc71faf1d261e2bb90cc21d6cbdf951b122f596f5eb3d90ba069b80832c46cf88b0ca1e64054d87f2e33f27df1f322c70af89f4ec909313bcea075068d344e56b3eada6312fb613925c3c9de369b328ee666121dae6c052103c4a0c3b65e68859146733573b921e8fb036ca7c9434b0f52412ac0cd169950f95a5580",
"0xf8d1a0db401bdef3bd74dec5338135194e69ab43e15aa891e5de20ef3e57cde5366ab5808080a0621c1fbbb026eddb70a4c645e152dcf9b3f1b40b9a1bdc4398a22bee4a46aca380a05528de70186525019cdf0880afb77b33ae4871fbfaad3e8bccd7dcb6402d746580808080a020fa8ae1091998f03c979f94e94ff6c011427da2834f1dffaec815fd3c5fa6e080a0611ff1f45d926197480694e690227d603e84e7c44b520473b9786cd4fafaf613a012dfcd444d4948c86a3dbad8f4f1dad09c313a63e6f8bf0ecb7bd799908aa3248080",
"0xf8518080808080a0cf86ad50e7ed35be6080c4cd74d835e58867b2e2ec03198baf29962de46a8cbe808080808080a03a5ec92acf98ebef8eeb621707a501ed0fd95186282ab1dcf8e7286a9142b90480808080",
"0xf86e9d2019df8705960e4a0a7ac52ab662c57cddd5f60a7f75f0c117ae2e073fb84ef84c0188067eab853ae20000a0724a8bd0aaa1c991a445a1e974deecd8cfe4ba2040de2578e98238b9f963ba8aa01717795a0fbfac056a8306e5cb0ac160c3ad752357e0360a408e59acd35ebb1c",
],
storageProof: [
"0xf90211a06a128b938cf5a3be9f5c7a8944945258db0b7a939cab65a4bda8fc4a8a2bf16aa0c61e8e76eede0e8a446743dde629574cd69dfe612aa0d30c6c8cafdb7f445214a084c1c16c0f4fae18501251afbc28ef21caf9b2f1b5a8f2f0b6b87d076f44f7bfa04ace3470f520e28ebdfa4e98a5ef51af05f647a3d1585f0d98f3393098839f17a0d0628e1db39bef70e79ceb5860a14b34b78eca696ec7910f3bfc91631a0abd50a0b718050b33452d627f87f02ba8b05f976e7eeb2c81cdd445770eeeefba236fa9a0f0a8fb4ce1456839b267d76b94838113ea18600fefa3617733888b1ce7da7ef7a034dd7e5a07aff6c7d141c66b4aab81f3f31363a92d48a9bc1fe072b94d69bf63a049c1f246035c714f4d6e8d81e7a20aef93140d067012314b37489a66f4e19db4a070279280c8be3e03684124acd488d9611ea5dbf62512280eb352980ec8334436a04e7d88090f29162b58e6fdd44446f90c5bc1c39c377d7c757c6010e9a63c738ca05eaab99620fe77019cac5ea6854f3efda933ea60f1326ecd03a32494850556a4a06b4116e3177b3012c5e06ab564d1a0611140ee2b81d50c8fd8c5aef333296965a00874454cb37dd61c28f8bb7da5d905f5dacf0b813914099244b65f536561e22aa073f2018c86cfe905a5bb8f69b43395c949714183a829990e0e33630431af8f86a03f36076859c730c0f5851ea263b5650dab7235f3c8ce258f74a3ae3b7d38add780",
"0xf90211a036c69f765a83b393b27f21eeb941b8a2965ec7d436b3965a5bc40953a32884c9a04fd94c2ff0df3a8453b14a36a55ee9a15096180c12feaddb7c904016e0250491a00b3973a34de7950e6eed8e413dacd2414ef22a90ff9fd322501301e159a2c081a0f6926e67b5dbe04b3991297ca0bd8f1fe63b1f193e16621c901ac81ad9c25a85a023776d17051b8899483fa00c050cc50eae159dbbd7b59a35290b6d6b272e07c9a00900c56dcf2ae9bc0d19ec918cbd7fe63e7afb8aa2d962d1d5cf5886c763a7dfa01e52f9000865a4df376396fac674f061a61603647923a3a577387c54e1b32826a02373c893c5feb4c772f345f6609f9f9a6032c068f2453aad191626ce6a2d625ca09fdcde9e12f55bd9b3bfb323f1c9a7488f573e0b01d829d6f0ee716e92f0f248a0353975fd758f23275ce22c485c939c781ba31aa8c6026688931ac61d0f0d8013a04f0352b630e3ae315c64d02f85c4cfc255524b445046426c3e67f6608a9689f7a0d3b728caecb48e019db5f0144aa081ce5954c8acadeadd3df36d25b6e24a7e0fa0758800b10d88e8b477fc17a2094b5a41aa69c37740305e1956ed558dd5dcd86ba09f51f4aeb641e8c068dc1370a71942792b4d30a572ad0c09eeff206f7dbe3355a0c5d5fa6fa22f56ac27c0f6538f94615e0e7bb49243d888ec6b0c86f61dc6922ca03b7be4e1038893b7cfeab5a172995ac07e1e90142cc566ddc2b613e3b2a08c3880",
"0xf901d1a086ece613a3028576c5e26a4ff50a9c3311c3bb3ca3751b8e52d2667b18917f4f80a0c54874707f838e0a2abf666fd3c50f900c9c0c38e9a69b37551b1711acef7eb1a0b7e0dc7b68d45f0f52e17301906d038323861fdb60583c6f505f76d304c73ea5a061a5e1481d528ed55ed1dafcbbbecc99276220ccce4ce56f50a05853638a3c4da08db8ab11699112f1f4cebece052af297997fcd361f5ceb88db4a7168e0366cdda0b1c641b80c0e5642b33866b899afc25070277d24665b6d73bc542543592c6eb2a069ae6c87a4a8692ea804b51379521e3856a6a980a1f6143f19ffaaa397c1699fa005e0523d440c3fb4654841a3d8ccce6e5eec4cd5f145668c0e95e847c9c4c39fa06c86477d3592a33fea0e7e425cef6b79610cd32b3bd17fa1318a5e20c9feb02fa0fae05cf440cf5cbb96e83bdd1828f5a582aec03edbb87e5035fd08660f09691980a0d91b6dc415a8c148823a7f865963d2b527c6a93bf882cd29da46f9a9594b4c41a03537d0ab40aa8b56059d99680365cc017a26fdf155fd6f2a7788311723b80738a0e38ba0e1b4f98b4b9f1925ca952a6a9076eda1bad2e36dbc80bc5135372a3feca086f2109580fb4d1a26bb9101b88c407eb13fade29243d68b83764716dd450e3980",
"0xe19f36c2516eb411c7c89f75dcf98d8ff95555585215a5f6242b4f24adbcb7424901",
],
},
];
describe("PatriciaMerkleTrieVerifier", async () => {
let verifier: MockPatriciaMerkleTrieVerifier;
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const MockPatriciaMerkleTrieVerifier = await ethers.getContractFactory("MockPatriciaMerkleTrieVerifier", deployer);
verifier = await MockPatriciaMerkleTrieVerifier.deploy();
await verifier.deployed();
});
for (const test of testcases) {
it(`should succeed for block[${test.block}] account[${test.account}] storage[${test.storage}]`, async () => {
const proof = concat([
`0x0${test.accountProof.length.toString(16)}`,
...test.accountProof,
`0x0${test.storageProof.length.toString(16)}`,
...test.storageProof,
]);
const [root, value, gasUsed] = await verifier.verifyPatriciaProof(test.account, test.storage, proof);
expect(test.expectedRoot).to.eq(root);
expect(test.expectedValue).to.eq(value);
console.log("gas usage:", gasUsed.toString());
});
}
// @todo add tests with invalid inputs
});

View File

@@ -0,0 +1,108 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { constants } from "ethers";
import { concat } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { ScrollChain, L1MessageQueue } from "../typechain";
describe("ScrollChain", async () => {
let queue: L1MessageQueue;
let chain: ScrollChain;
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const L1MessageQueue = await ethers.getContractFactory("L1MessageQueue", deployer);
queue = await L1MessageQueue.deploy();
await queue.deployed();
const RollupVerifier = await ethers.getContractFactory("RollupVerifier", deployer);
const verifier = await RollupVerifier.deploy();
await verifier.deployed();
const ScrollChain = await ethers.getContractFactory("ScrollChain", {
signer: deployer,
libraries: { RollupVerifier: verifier.address },
});
chain = await ScrollChain.deploy(0, 25, "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6");
await chain.deployed();
await chain.initialize(queue.address);
await chain.updateSequencer(deployer.address, true);
await queue.initialize(constants.AddressZero, constants.AddressZero);
});
it("should succeed", async () => {
await chain.importGenesisBatch({
blocks: [
{
blockHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
parentHash: constants.HashZero,
blockNumber: 0,
timestamp: 1639724192,
baseFee: 1000000000,
gasLimit: 940000000,
numTransactions: 0,
numL1Messages: 0,
},
],
prevStateRoot: constants.HashZero,
newStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
withdrawTrieRoot: constants.HashZero,
batchIndex: 0,
parentBatchHash: constants.HashZero,
l2Transactions: [],
});
const parentBatchHash = await chain.lastFinalizedBatchHash();
console.log("genesis batch hash:", parentBatchHash);
for (let numTx = 1; numTx <= 25; ++numTx) {
for (let txLength = 100; txLength <= 1000; txLength += 100) {
const txs: Array<Uint8Array> = [];
for (let i = 0; i < numTx; i++) {
const tx = new Uint8Array(4 + txLength);
let offset = 3;
for (let x = txLength; x > 0; x = Math.floor(x / 256)) {
tx[offset] = x % 256;
offset -= 1;
}
tx.fill(1, 4);
txs.push(tx);
}
const batch = {
blocks: [
{
blockHash: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
parentHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
blockNumber: 1,
timestamp: numTx * 100000 + txLength,
baseFee: 0,
gasLimit: 0,
numTransactions: 0,
numL1Messages: 0,
},
],
prevStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
newStateRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
withdrawTrieRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
batchIndex: 1,
parentBatchHash: parentBatchHash,
l2Transactions: concat(txs),
};
const estimateGas = await chain.estimateGas.commitBatch(batch);
const tx = await chain.commitBatch(batch, { gasLimit: estimateGas.mul(12).div(10) });
const receipt = await tx.wait();
console.log(
"Commit batch with l2TransactionsBytes:",
numTx * (txLength + 4),
"gasLimit:",
tx.gasLimit.toString(),
"estimateGas:",
estimateGas.toString(),
"gasUsed:",
receipt.gasUsed.toString()
);
}
}
});
});

View File

@@ -11,13 +11,15 @@ export owner=0x0000000000000000000000000000000000000000 # change to actual owner
# deploy contracts in layer 1
npx hardhat --network $layer1 run scripts/deploy_proxy_admin.ts
npx hardhat --network $layer1 run scripts/deploy_zkrollup.ts
npx hardhat --network $layer1 run scripts/deploy_scroll_chain.ts
env CONTRACT_NAME=L1ScrollMessenger npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1GatewayRouter npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1StandardERC20Gateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1CustomERC20Gateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1ERC721Gateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1ERC1155Gateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1ETHGateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L1WETHGateway npx hardhat run --network $layer1 scripts/deploy_proxy_contract.ts
# deploy contracts in layer 2, note: l2_messenger is predeployed
npx hardhat --network $layer2 run scripts/deploy_proxy_admin.ts
@@ -28,11 +30,13 @@ env CONTRACT_NAME=L2StandardERC20Gateway npx hardhat run --network $layer2 scrip
env CONTRACT_NAME=L2CustomERC20Gateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L2ERC721Gateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L2ERC1155Gateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L2ETHGateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
env CONTRACT_NAME=L2WETHGateway npx hardhat run --network $layer2 scripts/deploy_proxy_contract.ts
# initalize contracts in layer 1, should set proper bash env variables first
npx hardhat --network $layer1 run scripts/initialize_l1_erc20_gateway.ts
npx hardhat --network $layer1 run scripts/initialize_l1_gateway_router.ts
npx hardhat --network $layer1 run scripts/initialize_zkrollup.ts
npx hardhat --network $layer1 run scripts/initialize_scroll_chain.ts
npx hardhat --network $layer1 run scripts/initialize_l1_messenger.ts
npx hardhat --network $layer1 run scripts/initialize_l1_custom_erc20_gateway.ts
npx hardhat --network $layer1 run scripts/initialize_l1_erc1155_gateway.ts

View File

@@ -1,48 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
if (!addressFile.get("L1StandardERC20Gateway.implementation")) {
console.log(">> Deploy L1StandardERC20Gateway implementation");
const L1StandardERC20Gateway = await ethers.getContractFactory("L1StandardERC20Gateway", deployer);
const impl = await L1StandardERC20Gateway.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L1StandardERC20Gateway implementation deployed at ${impl.address}`);
addressFile.set("L1StandardERC20Gateway.implementation", impl.address);
}
const impl = addressFile.get("L1StandardERC20Gateway.implementation") as string;
if (!addressFile.get("L1StandardERC20Gateway.proxy")) {
console.log(">> Deploy L1StandardERC20Gateway proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L1StandardERC20Gateway proxy deployed at ${proxy.address}`);
addressFile.set("L1StandardERC20Gateway.proxy", proxy.address);
}
// Export contract address to testnet.
console.log(
`testnet-export: ${addressFile.get("L1StandardERC20Gateway.implementation")};${addressFile.get(
"L1StandardERC20Gateway.proxy"
)}`
);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -1,46 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
if (!addressFile.get("L1GatewayRouter.implementation")) {
console.log(">> Deploy L1GatewayRouter implementation");
const L1GatewayRouter = await ethers.getContractFactory("L1GatewayRouter", deployer);
const impl = await L1GatewayRouter.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L1GatewayRouter implementation deployed at ${impl.address}`);
addressFile.set("L1GatewayRouter.implementation", impl.address);
}
const impl = addressFile.get("L1GatewayRouter.implementation") as string;
if (!addressFile.get("L1GatewayRouter.proxy")) {
console.log(">> Deploy L1GatewayRouter proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L1GatewayRouter proxy deployed at ${proxy.address}`);
addressFile.set("L1GatewayRouter.proxy", proxy.address);
}
// Export contract address to testnet.
console.log(
`testnet-export: ${addressFile.get("L1GatewayRouter.implementation")};${addressFile.get("L1GatewayRouter.proxy")}`
);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -1,48 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
if (!addressFile.get("L1ScrollMessenger.implementation")) {
console.log(">> Deploy L1ScrollMessenger implementation");
const L1ScrollMessenger = await ethers.getContractFactory("L1ScrollMessenger", deployer);
const impl = await L1ScrollMessenger.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L1ScrollMessenger implementation deployed at ${impl.address}`);
addressFile.set("L1ScrollMessenger.implementation", impl.address);
}
const impl = addressFile.get("L1ScrollMessenger.implementation") as string;
if (!addressFile.get("L1ScrollMessenger.proxy")) {
console.log(">> Deploy L1ScrollMessenger proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L1ScrollMessenger proxy deployed at ${proxy.address}`);
addressFile.set("L1ScrollMessenger.proxy", proxy.address);
}
// Export contract address to testnet.
console.log(
`testnet-export: ${addressFile.get("L1ScrollMessenger.implementation")};${addressFile.get(
"L1ScrollMessenger.proxy"
)}`
);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -1,48 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
if (!addressFile.get("L2StandardERC20Gateway.implementation")) {
console.log(">> Deploy L2StandardERC20Gateway implementation");
const L2StandardERC20Gateway = await ethers.getContractFactory("L2StandardERC20Gateway", deployer);
const impl = await L2StandardERC20Gateway.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L2StandardERC20Gateway implementation deployed at ${impl.address}`);
addressFile.set("L2StandardERC20Gateway.implementation", impl.address);
}
const impl = addressFile.get("L2StandardERC20Gateway.implementation") as string;
if (!addressFile.get("L2StandardERC20Gateway.proxy")) {
console.log(">> Deploy L2StandardERC20Gateway proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L2StandardERC20Gateway proxy deployed at ${proxy.address}`);
addressFile.set("L2StandardERC20Gateway.proxy", proxy.address);
}
// Export contract address to testnet.
console.log(
`testnet-export: ${addressFile.get("L2StandardERC20Gateway.implementation")};${addressFile.get(
"L2StandardERC20Gateway.proxy"
)}`
);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -1,46 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
if (!addressFile.get("L2GatewayRouter.implementation")) {
console.log(">> Deploy L2GatewayRouter implementation");
const L2GatewayRouter = await ethers.getContractFactory("L2GatewayRouter", deployer);
const impl = await L2GatewayRouter.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L2GatewayRouter implementation deployed at ${impl.address}`);
addressFile.set("L2GatewayRouter.implementation", impl.address);
}
const impl = addressFile.get("L2GatewayRouter.implementation") as string;
if (!addressFile.get("L2GatewayRouter.proxy")) {
console.log(">> Deploy L2GatewayRouter proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L2GatewayRouter proxy deployed at ${proxy.address}`);
addressFile.set("L2GatewayRouter.proxy", proxy.address);
}
// Export contract address to testnet.
console.log(
`testnet-export: ${addressFile.get("L2GatewayRouter.implementation")};${addressFile.get("L2GatewayRouter.proxy")}`
);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -8,19 +8,38 @@ async function main() {
const [deployer] = await ethers.getSigners();
const owner = process.env.CONTRACT_OWNER || deployer.address;
if (!addressFile.get("L2ScrollMessenger")) {
const ProxyAdmin = await ethers.getContractAt("ProxyAdmin", addressFile.get("ProxyAdmin"), deployer);
const container = process.env.L1_BLOCK_CONTAINER_ADDR!;
const queue = process.env.L2_MESSAGE_QUEUE_ADDR!;
if (!addressFile.get("L2ScrollMessenger.implementation")) {
console.log(">> Deploy L2ScrollMessenger implementation");
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
const impl = await L2ScrollMessenger.deploy(owner);
const impl = await L2ScrollMessenger.deploy(container, queue);
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`✅ L2ScrollMessenger implementation deployed at ${impl.address}`);
addressFile.set("L2ScrollMessenger", impl.address);
addressFile.set("L2ScrollMessenger.implementation", impl.address);
}
const impl = addressFile.get("L2ScrollMessenger.implementation") as string;
if (!addressFile.get("L2ScrollMessenger.proxy")) {
console.log(">> Deploy L2ScrollMessenger proxy");
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const proxy = await TransparentUpgradeableProxy.deploy(impl, ProxyAdmin.address, "0x");
console.log(`>> waiting for transaction: ${proxy.deployTransaction.hash}`);
await proxy.deployed();
console.log(`✅ L2ScrollMessenger proxy deployed at ${proxy.address}`);
addressFile.set(`L2ScrollMessenger.proxy`, proxy.address);
}
// Export contract address to testnet.
console.log(`testnet-export: ${addressFile.get("L2ScrollMessenger")}`);
console.log(
`testnet-export: ${addressFile.get("L2ScrollMessenger.implementation")};${addressFile.get(
"L2ScrollMessenger.proxy"
)}`
);
}
// We recommend this pattern to be able to use async/await everywhere

Some files were not shown because too many files have changed in this diff Show More