mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 16:08:04 -05:00
Compare commits
14 Commits
feat/fix_p
...
feat/enfor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86ad566dcb | ||
|
|
764cd19403 | ||
|
|
5b91abd2a7 | ||
|
|
d45a2931f6 | ||
|
|
488d54c381 | ||
|
|
0430600d1e | ||
|
|
6b853f73eb | ||
|
|
df6d8af499 | ||
|
|
1f17c50277 | ||
|
|
aea2976969 | ||
|
|
7d664bb79f | ||
|
|
9230a9eb92 | ||
|
|
0af2fda0b8 | ||
|
|
e1d7c94bfc |
File diff suppressed because one or more lines are too long
@@ -5,36 +5,55 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
func TestPackRelayMessageWithProof(t *testing.T) {
|
||||
func TestEventSignature(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"))
|
||||
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"))
|
||||
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"))
|
||||
|
||||
assert.Equal(bridge_abi.L1AppendMessageEventSignature, common.HexToHash("4e24f8e58edb75fdffd4bd6a38963c5bd49cdf3f7898748e48c58b2076cfe70f"))
|
||||
|
||||
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"))
|
||||
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("fa1488a208a99e5ca060aff7763286188c6a5bdc43964fb76baf67b419450995"))
|
||||
|
||||
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
|
||||
}
|
||||
|
||||
func TestPackRelayL2MessageWithProof(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1MessengerABI := bridge_abi.L1MessengerABI
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(0),
|
||||
BatchIndex: big.NewInt(0),
|
||||
MerkleProof: make([]byte, 0),
|
||||
BlockHash: common.Hash{},
|
||||
MessageRootProof: make([]common.Hash, 10),
|
||||
}
|
||||
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
_, err := l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackCommitBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
l1RollupABI := bridge_abi.RollupABI
|
||||
|
||||
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: common.Address{},
|
||||
Target: common.Address{},
|
||||
Nonce: 0,
|
||||
Gas: 0,
|
||||
@@ -57,6 +76,7 @@ func TestPackCommitBatch(t *testing.T) {
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: txns,
|
||||
MessageRoot: common.Hash{},
|
||||
}
|
||||
|
||||
batch := bridge_abi.IZKRollupLayer2Batch{
|
||||
@@ -65,15 +85,14 @@ func TestPackCommitBatch(t *testing.T) {
|
||||
Blocks: []bridge_abi.IZKRollupLayer2BlockHeader{header},
|
||||
}
|
||||
|
||||
_, err = l1RollupABI.Pack("commitBatch", batch)
|
||||
_, err := l1RollupABI.Pack("commitBatch", batch)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
l1RollupABI := bridge_abi.RollupABI
|
||||
|
||||
proof := make([]*big.Int, 10)
|
||||
instance := make([]*big.Int, 10)
|
||||
@@ -82,16 +101,83 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
instance[i] = big.NewInt(0)
|
||||
}
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
|
||||
_, err := l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackRelayMessage(t *testing.T) {
|
||||
func TestPackRelayL1MessageWithProof(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
l2MessengerABI := bridge_abi.L2MessengerABI
|
||||
|
||||
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0))
|
||||
proof := bridge_abi.IL2ScrollMessengerL1MessageProof{
|
||||
BlockHash: common.Hash{},
|
||||
StateRootProof: make([]byte, 10),
|
||||
}
|
||||
_, err := l2MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1BlockContainerABI := bridge_abi.L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), make([]byte, 0))
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestUnpackL1Event_SentMessage(t *testing.T) {
|
||||
log := types.Log{
|
||||
Topics: []common.Hash{
|
||||
common.HexToHash("0x806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"),
|
||||
common.HexToHash("0x000000000000000000000000330a32fee6421b9c0b6cfaa6ddaa1ad6c8ed17f9"),
|
||||
},
|
||||
Data: common.Hex2Bytes("000000000000000000000000f8f8b383285cd23ed2cb9fdf9599b968ddafc3c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063c63a8100000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000013f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e48431f5c10000000000000000000000001838a36ab91900fa0e637006bb2faa6ef1a5f84100000000000000000000000038ba9a208f34ddc9332f6dfc0e9d567f098958a4000000000000000000000000bf290324852d86976e9982241723e53eaf2d29d0000000000000000000000000bf290324852d86976e9982241723e53eaf2d29d0000000000000000000000000000000000000000000000002b5e3af16b188000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}
|
||||
|
||||
event := bridge_abi.L1SentMessageEvent{}
|
||||
err := utils.UnpackLog(bridge_abi.L1MessengerABI, &event, "SentMessage", log)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, event.Target, common.HexToAddress("0x330a32fee6421b9c0b6cfaa6ddaa1ad6c8ed17f9"))
|
||||
assert.Equal(t, event.Sender, common.HexToAddress("0xf8f8b383285cd23ed2cb9fdf9599b968ddafc3c0"))
|
||||
|
||||
out := make(map[string]interface{})
|
||||
err = utils.UnpackLogIntoMap(bridge_abi.L1MessengerABI, out, "SentMessage", log)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, event.Target, out["target"])
|
||||
assert.Equal(t, event.Sender, out["sender"])
|
||||
assert.Equal(t, event.Value, out["value"])
|
||||
assert.Equal(t, event.Fee, out["fee"])
|
||||
assert.Equal(t, event.Deadline, out["deadline"])
|
||||
assert.Equal(t, event.Message, out["message"])
|
||||
assert.Equal(t, event.MessageNonce, out["messageNonce"])
|
||||
assert.Equal(t, event.GasLimit, out["gasLimit"])
|
||||
}
|
||||
|
||||
func TestUnpackL1Event_CommitBatch(t *testing.T) {
|
||||
log := types.Log{
|
||||
Topics: []common.Hash{
|
||||
common.HexToHash("0xa26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"),
|
||||
common.HexToHash("0x214875997226c54175df6dc97e1bce7f3d624263e1b33bf5daf37b3440d8ffbc"),
|
||||
},
|
||||
Data: common.Hex2Bytes("4476f580301f544e9e76d1c72037fdb53d83e43436639c69d472ad3be0afbdc000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}
|
||||
|
||||
event := bridge_abi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(bridge_abi.RollupABI, &event, "CommitBatch", log)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, event.BatchId, common.HexToHash("0x214875997226c54175df6dc97e1bce7f3d624263e1b33bf5daf37b3440d8ffbc"))
|
||||
assert.Equal(t, event.BatchHash, common.HexToHash("0x4476f580301f544e9e76d1c72037fdb53d83e43436639c69d472ad3be0afbdc0"))
|
||||
|
||||
out := make(map[string]interface{})
|
||||
err = utils.UnpackLogIntoMap(bridge_abi.RollupABI, out, "CommitBatch", log)
|
||||
assert.NoError(t, err)
|
||||
batchID := out["_batchId"].([32]byte)
|
||||
batchHash := out["_batchHash"].([32]byte)
|
||||
parentHash := out["_parentHash"].([32]byte)
|
||||
assert.Equal(t, event.BatchId, common.BytesToHash(batchID[:]))
|
||||
assert.Equal(t, event.BatchHash, common.BytesToHash(batchHash[:]))
|
||||
assert.Equal(t, event.BatchIndex, out["_batchIndex"].(*big.Int))
|
||||
assert.Equal(t, event.ParentHash, common.BytesToHash(parentHash[:]))
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"confirmations": 6,
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
@@ -20,6 +21,9 @@
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"rollup_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -27,6 +31,7 @@
|
||||
"confirmations": 1,
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
|
||||
@@ -14,6 +14,8 @@ type L1Config struct {
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The rollup contract address deployed on layer 1 chain.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address"`
|
||||
// The message queue contract address deployed on layer 1 chain.
|
||||
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
}
|
||||
|
||||
@@ -13,7 +13,11 @@ type L2Config struct {
|
||||
// l2geth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The messenger contract address deployed on layer 2 chain.
|
||||
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
|
||||
L2MessengerAddress common.Address `json:"l2_messenger_address"`
|
||||
// The message queue contract address deployed on layer 2 chain.
|
||||
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
|
||||
// The block container contract address deployed on layer 2 chain.
|
||||
L2BlockContainerAddress common.Address `json:"l2_block_container_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
// The batch_proposer config
|
||||
|
||||
@@ -40,6 +40,8 @@ type RelayerConfig struct {
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// MessengerContractAddress store the scroll messenger contract address.
|
||||
MessengerContractAddress common.Address `json:"messenger_contract_address"`
|
||||
// ContrainerContractAddress store the l1 block container contract address.
|
||||
ContrainerContractAddress common.Address `json:"container_contract_address,omitempty"`
|
||||
// sender config
|
||||
SenderConfig *SenderConfig `json:"sender_config"`
|
||||
// The private key of the relayer
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.18
|
||||
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
@@ -21,9 +22,13 @@ require (
|
||||
github.com/ethereum/go-ethereum v1.10.26 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/uint256 v1.2.0 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
@@ -34,6 +39,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
|
||||
@@ -118,6 +118,7 @@ github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlK
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
@@ -139,6 +140,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
@@ -206,6 +209,7 @@ github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
|
||||
@@ -225,10 +229,13 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mq
|
||||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
@@ -262,6 +269,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
@@ -282,6 +291,8 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
@@ -295,6 +306,7 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
@@ -302,9 +314,11 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
@@ -480,6 +494,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -591,6 +606,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
@@ -644,6 +660,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
@@ -653,6 +670,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
@@ -21,17 +23,23 @@ type Backend struct {
|
||||
|
||||
// New returns a new instance of Backend.
|
||||
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
|
||||
client, err := ethclient.Dial(cfg.Endpoint)
|
||||
rawClient, err := rpc.DialContext(ctx, cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
gethClient := gethclient.New(rawClient)
|
||||
ethClient := ethclient.NewClient(rawClient)
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, gethClient, ethClient, int64(cfg.Confirmations), cfg.L1MessageQueueAddress, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
|
||||
watcher, err := NewWatcher(ctx, gethClient, ethClient, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.RollupContractAddress, orm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
|
||||
@@ -11,13 +11,17 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
@@ -27,28 +31,42 @@ import (
|
||||
// Actions are triggered by new head from layer 1 geth node.
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
sender *sender.Sender
|
||||
ctx context.Context
|
||||
|
||||
db orm.L1MessageOrm
|
||||
gethClient *gethclient.Client
|
||||
ethClient *ethclient.Client
|
||||
|
||||
// sender and channel used for relay message
|
||||
relaySender *sender.Sender
|
||||
relayConfirmationCh <-chan *sender.Confirmation
|
||||
|
||||
// sender and channel used for import blocks
|
||||
importSender *sender.Sender
|
||||
importConfirmationCh <-chan *sender.Confirmation
|
||||
|
||||
l1MessageQueueAddress common.Address
|
||||
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
confirmationCh <-chan *sender.Confirmation
|
||||
l2MessengerABI *abi.ABI
|
||||
l2MessengerABI *abi.ABI
|
||||
l1BlockContainerABI *abi.ABI
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
func NewLayer1Relayer(ctx context.Context, gethClient *gethclient.Client, ethClient *ethclient.Client, l1ConfirmNum int64, l1MessageQueueAddress common.Address, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
relaySender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Warn("new L2MessengerABI failed", "err", err)
|
||||
log.Error("new relayer sender failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if len(cfg.RollupSenderPrivateKeys) != 1 {
|
||||
return nil, errors.New("more than 1 private key for importing L1 block")
|
||||
}
|
||||
importSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new sender failed", "main address", addr.String(), "err", err)
|
||||
@@ -56,20 +74,39 @@ func NewLayer1Relayer(ctx context.Context, l1ConfirmNum int64, db orm.L1MessageO
|
||||
}
|
||||
|
||||
return &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
sender: sender,
|
||||
db: db,
|
||||
l2MessengerABI: l2MessengerABI,
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
confirmationCh: sender.ConfirmChan(),
|
||||
ctx: ctx,
|
||||
|
||||
gethClient: gethClient,
|
||||
ethClient: ethClient,
|
||||
|
||||
relaySender: relaySender,
|
||||
relayConfirmationCh: relaySender.ConfirmChan(),
|
||||
|
||||
importSender: importSender,
|
||||
importConfirmationCh: importSender.ConfirmChan(),
|
||||
|
||||
l1MessageQueueAddress: l1MessageQueueAddress,
|
||||
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
|
||||
l2MessengerABI: bridge_abi.L2MessengerABI,
|
||||
l1BlockContainerABI: bridge_abi.L1BlockContainerABI,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
block, err := r.db.GetLatestImportedL1Block()
|
||||
if err != nil {
|
||||
log.Error("GetLatestImportedL1Block failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
|
||||
msgs, err := r.db.GetL1MessagesByStatusUpToProofHeight(orm.MsgPending, block.Number, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
@@ -80,7 +117,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
if err = r.processSavedEvent(msg); err != nil {
|
||||
if err = r.processSavedEvent(msg, block); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
@@ -89,28 +126,58 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message, block *orm.L1BlockInfo) error {
|
||||
// @todo add support to relay multiple messages
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
log.Error("Failed to parse message value", "nonce", msg.Nonce, "height", msg.Height, "value", msg.Value)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
|
||||
if len(msg.MessageProof) == 0 {
|
||||
// empty proof, we need to fetch storage proof from client
|
||||
hash := common.HexToHash(msg.MsgHash)
|
||||
proofs, err := utils.GetL1MessageProof(r.gethClient, r.l1MessageQueueAddress, []common.Hash{hash}, block.Number)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetL1MessageProof", "msg.hash", msg.MsgHash, "height", block.Number, "err", err)
|
||||
return err
|
||||
}
|
||||
msg.MessageProof = common.Bytes2Hex(proofs[0])
|
||||
msg.ProofHeight = block.Number
|
||||
}
|
||||
|
||||
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
|
||||
"number": msg.ProofHeight,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Failed to GetL1BlockInfos from db", "proof_height", msg.ProofHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
if len(blocks) != 1 {
|
||||
log.Error("Block not exist", "height", msg.ProofHeight)
|
||||
return errors.New("block not exist")
|
||||
}
|
||||
|
||||
proof := bridge_abi.IL2ScrollMessengerL1MessageProof{
|
||||
BlockHash: common.HexToHash(blocks[0].Hash),
|
||||
StateRootProof: common.Hex2Bytes(msg.MessageProof),
|
||||
}
|
||||
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l2MessengerABI.Pack("relayMessage", from, target, value, fee, deadline, msgNonce, calldata)
|
||||
data, err := r.l2MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
hash, err := r.relaySender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
@@ -129,6 +196,70 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessPendingBlocks imports failed/pending block headers to layer2
|
||||
func (r *Layer1Relayer) ProcessPendingBlocks() {
|
||||
// handle failed block first since we need to import sequentially
|
||||
failedBlocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
|
||||
"block_status": orm.L1BlockFailed,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch failed L1 Blocks from db", "err", err)
|
||||
return
|
||||
}
|
||||
for _, block := range failedBlocks {
|
||||
if err = r.importBlock(block); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to retry failed L1 block", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If there are failed blocks, we don't handle pending blocks. This is
|
||||
// because if there are `importing`` blocks after `failed`` blocks, the
|
||||
// `importing` block will fail eventually. If we send `pending` blocks
|
||||
// immediately, they will also fail eventually.
|
||||
if len(failedBlocks) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// handle pending blocks
|
||||
pendingBlocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
|
||||
"block_status": orm.L1BlockPending,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L1 Blocks from db", "err", err)
|
||||
return
|
||||
}
|
||||
for _, block := range pendingBlocks {
|
||||
if err = r.importBlock(block); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to import pending L1 block", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) importBlock(block *orm.L1BlockInfo) error {
|
||||
data, err := r.l1BlockContainerABI.Pack("importBlockHeader", common.HexToHash(block.Hash), common.Hex2Bytes(block.HeaderRLP), make([]byte, 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.importSender.SendTransaction(block.Hash, &r.cfg.ContrainerContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("import block to layer2", "height", block.Number, "hash", block.Hash, "tx hash", hash)
|
||||
|
||||
err = r.db.UpdateL1BlockStatusAndImportTxHash(r.ctx, block.Hash, orm.L1BlockImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateL1BlockStatusAndImportTxHash failed", "height", block.Number, "hash", block.Hash, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer1Relayer) Start() {
|
||||
go func() {
|
||||
@@ -139,19 +270,39 @@ func (r *Layer1Relayer) Start() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// number, err := r.client.BlockNumber(r.ctx)
|
||||
// log.Info("receive header", "height", number)
|
||||
r.ProcessPendingBlocks()
|
||||
r.ProcessSavedEvents()
|
||||
case cfm := <-r.confirmationCh:
|
||||
case cfm := <-r.relayConfirmationCh:
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Warn("relay transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
log.Info("relay transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.importConfirmationCh:
|
||||
if !cfm.IsSuccessful {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL1BlockStatusAndImportTxHash(r.ctx, cfm.ID, orm.L1BlockFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1BlockStatusAndImportTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("import transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL1BlockStatusAndImportTxHash(r.ctx, cfm.ID, orm.L1BlockImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1BlockStatusAndImportTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("import transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case <-r.stopCh:
|
||||
return
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -19,7 +23,12 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), 1, db, cfg.L2Config.RelayerConfig)
|
||||
rawClient, err := rpc.DialContext(context.Background(), l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
gethClient := gethclient.New(rawClient)
|
||||
ethClient := ethclient.NewClient(rawClient)
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), gethClient, ethClient, 1, common.Address{}, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -2,15 +2,19 @@ package l1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -38,50 +42,78 @@ type rollupEvent struct {
|
||||
|
||||
// Watcher will listen for smart contract events from Eth L1.
|
||||
type Watcher struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
db database.OrmFactory
|
||||
ctx context.Context
|
||||
|
||||
gethClient *gethclient.Client
|
||||
ethClient *ethclient.Client
|
||||
|
||||
db database.OrmFactory
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations uint64
|
||||
confirmations uint64
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
rollupAddress common.Address
|
||||
rollupABI *abi.ABI
|
||||
|
||||
// The height of the block that the watcher has retrieved event logs
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
stop chan bool
|
||||
}
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
func NewWatcher(ctx context.Context, gethClient *gethclient.Client, ethClient *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, messageQueueAddress common.Address, rollupAddress common.Address, db database.OrmFactory) (*Watcher, error) {
|
||||
savedMsgHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
savedHeight = 0
|
||||
log.Warn("Failed to fetch L1 watched message height from db", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if savedHeight < int64(startHeight) {
|
||||
savedHeight = int64(startHeight)
|
||||
if savedMsgHeight < int64(startHeight) {
|
||||
savedMsgHeight = int64(startHeight)
|
||||
}
|
||||
savedBlockHeight, err := db.GetLatestL1BlockHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if savedBlockHeight < startHeight {
|
||||
savedBlockHeight = startHeight
|
||||
}
|
||||
|
||||
stop := make(chan bool)
|
||||
|
||||
return &Watcher{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
db: db,
|
||||
confirmations: confirmations,
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
rollupAddress: rollupAddress,
|
||||
rollupABI: bridge_abi.RollupMetaABI,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
stop: stop,
|
||||
}
|
||||
ctx: ctx,
|
||||
|
||||
gethClient: gethClient,
|
||||
ethClient: ethClient,
|
||||
|
||||
db: db,
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L1MessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L1MessageQueueABI,
|
||||
|
||||
rollupAddress: rollupAddress,
|
||||
rollupABI: bridge_abi.RollupABI,
|
||||
|
||||
processedMsgHeight: uint64(savedMsgHeight),
|
||||
processedBlockHeight: savedBlockHeight,
|
||||
|
||||
stop: stop,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start the Watcher module.
|
||||
@@ -96,13 +128,18 @@ func (w *Watcher) Start() {
|
||||
return
|
||||
|
||||
default:
|
||||
blockNumber, err := w.client.BlockNumber(w.ctx)
|
||||
blockNumber, err := w.ethClient.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.fetchBlockHeader(blockNumber); err != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", blockNumber, "err", err)
|
||||
}
|
||||
|
||||
if err := w.FetchContractEvent(blockNumber); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
log.Error("Failed to fetch L1 bridge event", "lastest", blockNumber, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -116,12 +153,74 @@ func (w *Watcher) Stop() {
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// fetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *Watcher) fetchBlockHeader(blockHeight uint64) error {
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
var blocks []*orm.L1BlockInfo
|
||||
var err error
|
||||
height := fromBlock
|
||||
for ; height <= toBlock; height++ {
|
||||
var block *types.Block
|
||||
block, err = w.ethClient.BlockByNumber(w.ctx, big.NewInt(height))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", height, "err", err)
|
||||
break
|
||||
}
|
||||
var headerRLPBytes []byte
|
||||
headerRLPBytes, err = rlp.EncodeToBytes(block.Header())
|
||||
if err != nil {
|
||||
log.Warn("Failed to rlp encode header", "height", height, "err", err)
|
||||
break
|
||||
}
|
||||
blocks = append(blocks, &orm.L1BlockInfo{
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
HeaderRLP: common.Bytes2Hex(headerRLPBytes),
|
||||
})
|
||||
}
|
||||
|
||||
// failed at first block, return with the error
|
||||
if height == fromBlock {
|
||||
return err
|
||||
}
|
||||
toBlock = height - 1
|
||||
|
||||
// insert succeed blocks
|
||||
err = w.db.InsertL1Blocks(w.ctx, blocks)
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
defer func() {
|
||||
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
var dbTx *sqlx.Tx
|
||||
var dbTxErr error
|
||||
defer func() {
|
||||
if dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
|
||||
@@ -138,18 +237,20 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.messageQueueAddress,
|
||||
w.rollupAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
|
||||
query.Topics[0] = make([]common.Hash, 6)
|
||||
query.Topics[0][0] = bridge_abi.L1SentMessageEventSignature
|
||||
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature
|
||||
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature
|
||||
query.Topics[0][5] = bridge_abi.L1AppendMessageEventSignature
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
logs, err := w.ethClient.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get event logs", "err", err)
|
||||
return err
|
||||
@@ -216,9 +317,38 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
// group messages by height
|
||||
dbTx, err = w.db.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < len(sentMessageEvents); {
|
||||
j := i
|
||||
var messages []*orm.L1Message
|
||||
for ; j < len(sentMessageEvents) && sentMessageEvents[i].Height == sentMessageEvents[j].Height; j++ {
|
||||
messages = append(messages, sentMessageEvents[j])
|
||||
}
|
||||
i = j
|
||||
err = w.fillMessageProof(messages)
|
||||
if err != nil {
|
||||
log.Error("Failed to fillMessageProof", "err", err)
|
||||
// make sure we will rollback
|
||||
dbTxErr = err
|
||||
return err
|
||||
}
|
||||
|
||||
dbTxErr = w.db.SaveL1MessagesInDbTx(w.ctx, dbTx, messages)
|
||||
if dbTxErr != nil {
|
||||
log.Error("SaveL1MessagesInDbTx failed", "error", dbTxErr)
|
||||
return dbTxErr
|
||||
}
|
||||
}
|
||||
|
||||
dbTxErr = dbTx.Commit()
|
||||
if dbTxErr != nil {
|
||||
log.Error("dbTx.Commit failed", "error", dbTxErr)
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
@@ -234,30 +364,33 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
var l1Messages []*orm.L1Message
|
||||
var relayedMessages []relayedMessage
|
||||
var rollupEvents []rollupEvent
|
||||
var lastAppendMsgHash common.Hash
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
Value *big.Int // uint256
|
||||
Fee *big.Int // uint256
|
||||
Deadline *big.Int // uint256
|
||||
Message []byte
|
||||
MessageNonce *big.Int // uint256
|
||||
GasLimit *big.Int // uint256
|
||||
}{}
|
||||
|
||||
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
|
||||
case bridge_abi.L1SentMessageEventSignature:
|
||||
event := bridge_abi.L1SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
// target is in topics[1]
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.Fee,
|
||||
event.Deadline,
|
||||
event.Message,
|
||||
event.MessageNonce,
|
||||
)
|
||||
// they should always match, just double check
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
return l1Messages, relayedMessages, rollupEvents, errors.New("l1 message hash mismatch")
|
||||
}
|
||||
|
||||
l1Messages = append(l1Messages, &orm.L1Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -268,68 +401,64 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L1RelayedMessageEventSignature:
|
||||
event := bridge_abi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L1FailedRelayedMessageEventSignature:
|
||||
event := bridge_abi.L1FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchIndex *big.Int
|
||||
ParentHash common.Hash
|
||||
}{}
|
||||
// BatchID is in topics[1]
|
||||
event.BatchID = common.HexToHash(vLog.Topics[1].String())
|
||||
err := w.rollupABI.UnpackIntoInterface(&event, "CommitBatch", vLog.Data)
|
||||
case bridge_abi.L1CommitBatchEventSignature:
|
||||
event := bridge_abi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(w.rollupABI, &event, "CommitBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
batchID: event.BatchID,
|
||||
batchID: event.BatchId,
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupCommitted,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchIndex *big.Int
|
||||
ParentHash common.Hash
|
||||
}{}
|
||||
// BatchID is in topics[1]
|
||||
event.BatchID = common.HexToHash(vLog.Topics[1].String())
|
||||
err := w.rollupABI.UnpackIntoInterface(&event, "FinalizeBatch", vLog.Data)
|
||||
case bridge_abi.L1FinalizeBatchEventSignature:
|
||||
event := bridge_abi.L1FinalizeBatchEvent{}
|
||||
err := utils.UnpackLog(w.rollupABI, &event, "FinalizeBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
batchID: event.BatchID,
|
||||
batchID: event.BatchId,
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupFinalized,
|
||||
})
|
||||
case bridge_abi.L1AppendMessageEventSignature:
|
||||
event := bridge_abi.L1AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 AppendMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
lastAppendMsgHash = event.MsgHash
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
@@ -337,3 +466,25 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
|
||||
return l1Messages, relayedMessages, rollupEvents, nil
|
||||
}
|
||||
|
||||
// fetchMessageProof will fetch storage proof for msgs.
|
||||
// caller should make sure the height of all msgs are the same.
|
||||
func (w *Watcher) fillMessageProof(msgs []*orm.L1Message) error {
|
||||
var hashes []common.Hash
|
||||
for _, msg := range msgs {
|
||||
hashes = append(hashes, common.HexToHash(msg.MsgHash))
|
||||
}
|
||||
|
||||
height := msgs[0].Height
|
||||
proofs, err := utils.GetL1MessageProof(w.gethClient, w.messageQueueAddress, hashes, height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(msgs); i++ {
|
||||
msgs[i].ProofHeight = height
|
||||
msgs[i].MessageProof = common.Bytes2Hex(proofs[i])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -18,12 +20,15 @@ func testStartWatcher(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
rawClient, err := rpc.DialContext(context.Background(), l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
gethClient := gethclient.New(rawClient)
|
||||
ethClient := ethclient.NewClient(rawClient)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher, err := NewWatcher(context.Background(), gethClient, ethClient, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RollupContractAddress, db)
|
||||
assert.NoError(t, err)
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
}
|
||||
|
||||
@@ -32,7 +32,10 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
|
||||
l2Watcher, err := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, cfg.L2BlockContainerAddress, orm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
|
||||
@@ -98,7 +98,7 @@ func (w *batchProposer) tryProposeBatch() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
|
||||
func (w *batchProposer) createBatchForBlocks(blocks []*orm.L2BlockInfo) error {
|
||||
dbTx, err := w.orm.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -40,6 +40,14 @@ func testBatchProposer(t *testing.T) {
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
|
||||
|
||||
// update message root
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
err = db.SetMessageRootForBlocksInDBTx(dbTx, []uint64{trace2.Header.Number.Uint64(), trace3.Header.Number.Uint64()}, "xx")
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
|
||||
|
||||
proposer := newBatchProposer(&config.BatchProposerConfig{
|
||||
|
||||
@@ -81,10 +81,10 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
db: db,
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
l1MessengerABI: bridge_abi.L1MessengerABI,
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.RollupMetaABI,
|
||||
l1RollupABI: bridge_abi.RollupABI,
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
@@ -126,7 +126,7 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
return r.processSavedEvent(msg, batch)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
@@ -138,14 +138,12 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBatch) error {
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
MerkleProof: make([]byte, 0),
|
||||
BlockHash: common.HexToHash(batch.EndBlockHash),
|
||||
MessageRootProof: DecodeBytesToMerkleProof(common.Hex2Bytes(msg.Proof)),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
@@ -241,7 +239,6 @@ func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
|
||||
@@ -81,8 +81,8 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: templateL2Message[0].Height},
|
||||
&orm.BlockInfo{Number: templateL2Message[0].Height + 1},
|
||||
&orm.L2BlockInfo{Number: templateL2Message[0].Height},
|
||||
&orm.L2BlockInfo{Number: templateL2Message[0].Height + 1},
|
||||
"0f", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
@@ -140,8 +140,8 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: traces[0].Header.Number.Uint64()},
|
||||
&orm.BlockInfo{Number: traces[1].Header.Number.Uint64()},
|
||||
&orm.L2BlockInfo{Number: traces[0].Header.Number.Uint64()},
|
||||
&orm.L2BlockInfo{Number: traces[1].Header.Number.Uint64()},
|
||||
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
@@ -179,7 +179,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.L2BlockInfo{}, &orm.L2BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -2,11 +2,13 @@ package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -36,6 +38,12 @@ type relayedMessage struct {
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
type importedBlock struct {
|
||||
blockHeight uint64
|
||||
blockHash common.Hash
|
||||
txHash common.Hash
|
||||
}
|
||||
|
||||
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type WatcherClient struct {
|
||||
ctx context.Context
|
||||
@@ -45,10 +53,19 @@ type WatcherClient struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
confirmations uint64
|
||||
confirmations uint64
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
blockContainerAddress common.Address
|
||||
blockContainerABI *abi.ABI
|
||||
|
||||
withdrawTrie *WithdrawTrie
|
||||
|
||||
// The height of the block that the watcher has retrieved event logs
|
||||
processedMsgHeight uint64
|
||||
|
||||
@@ -59,25 +76,61 @@ type WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress, messageQueueAddress, blockContainerAddress common.Address, orm database.OrmFactory) (*WatcherClient, error) {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
savedHeight = 0
|
||||
}
|
||||
|
||||
withdrawTrie := NewWithdrawTrie()
|
||||
currentMessageNonce, err := orm.GetLayer2LatestMessageNonce()
|
||||
if err != nil {
|
||||
log.Warn("fetch message nonce from db failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if currentMessageNonce != -1 {
|
||||
msg, err := orm.GetL2MessageByNonce(uint64(currentMessageNonce))
|
||||
if err != nil {
|
||||
log.Warn("fetch message by nonce from db failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
// fetch and rebuild from message db
|
||||
proofBytes, err := orm.GetMessageProofByNonce(uint64(currentMessageNonce))
|
||||
if err != nil {
|
||||
log.Warn("fetch message proof from db failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(proofBytes)%32 != 0 {
|
||||
log.Warn("proof string has wrong length", "length", len(proofBytes))
|
||||
return nil, errors.New("proof string with wrong length")
|
||||
}
|
||||
withdrawTrie.Initialize(uint64(currentMessageNonce), common.HexToHash(msg.MsgHash), proofBytes)
|
||||
}
|
||||
|
||||
return &WatcherClient{
|
||||
ctx: ctx,
|
||||
Client: client,
|
||||
orm: orm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
confirmations: confirmations,
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L2MessengerMetaABI,
|
||||
stopCh: make(chan struct{}),
|
||||
stopped: 0,
|
||||
batchProposer: newBatchProposer(bpCfg, orm),
|
||||
}
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L2MessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L2MessageQueueABI,
|
||||
|
||||
blockContainerAddress: blockContainerAddress,
|
||||
blockContainerABI: bridge_abi.L1BlockContainerABI,
|
||||
|
||||
withdrawTrie: withdrawTrie,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
stopped: 0,
|
||||
batchProposer: newBatchProposer(bpCfg, orm),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start the Listening process
|
||||
@@ -238,6 +291,16 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
var dbTx *sqlx.Tx
|
||||
var dbTxErr error
|
||||
defer func() {
|
||||
if dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
@@ -254,13 +317,17 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.messageQueueAddress,
|
||||
w.blockContainerAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = bridge_abi.L2SentMessageEventSignature
|
||||
query.Topics[0][1] = bridge_abi.L2RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridge_abi.L2FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridge_abi.L2AppendMessageEventSignature
|
||||
query.Topics[0][4] = bridge_abi.L2ImportBlockEventSignature
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -272,14 +339,24 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to,
|
||||
"cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
sentMessageEvents, relayedMessageEvents, importedBlockEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Update imported block first to make sure we don't forget to update importing blocks.
|
||||
for _, block := range importedBlockEvents {
|
||||
err = w.orm.UpdateL1BlockStatusAndImportTxHash(w.ctx, block.blockHash.String(), orm.L1BlockImported, block.txHash.String())
|
||||
if err != nil {
|
||||
log.Error("Failed to update l1 block status and import tx hash", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
@@ -296,8 +373,49 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
dbTx, err = w.orm.Beginx()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// group sentMessageEvents by block height
|
||||
index := 0
|
||||
nonce := w.withdrawTrie.NextMessageNonce
|
||||
for height := from; height <= to; height++ {
|
||||
var hashes []common.Hash
|
||||
var msgs []*orm.L2Message
|
||||
for ; index < len(sentMessageEvents) && sentMessageEvents[index].Height == uint64(height); index++ {
|
||||
if nonce != sentMessageEvents[index].Nonce {
|
||||
log.Error("nonce mismatch", "expected", nonce, "found", sentMessageEvents[index].Nonce)
|
||||
return
|
||||
}
|
||||
hashes = append(hashes, common.HexToHash(sentMessageEvents[index].MsgHash))
|
||||
msgs = append(msgs, sentMessageEvents[index])
|
||||
nonce++
|
||||
}
|
||||
proofBytes := w.withdrawTrie.AppendMessages(hashes)
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
msgs[i].Proof = common.Bytes2Hex(proofBytes[i])
|
||||
}
|
||||
|
||||
// save message root in block
|
||||
dbTxErr = w.orm.SetMessageRootForBlocksInDBTx(dbTx, []uint64{uint64(height)}, w.withdrawTrie.MessageRoot().String())
|
||||
if dbTxErr != nil {
|
||||
log.Error("SetBatchIDForBlocksInDBTx failed", "error", dbTxErr)
|
||||
return
|
||||
}
|
||||
|
||||
// save l2 messages
|
||||
dbTxErr = w.orm.SaveL2MessagesInDbTx(w.ctx, dbTx, msgs)
|
||||
if dbTxErr != nil {
|
||||
log.Error("SaveL2MessagesInDbTx failed", "error", dbTxErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
dbTxErr = dbTx.Commit()
|
||||
if dbTxErr != nil {
|
||||
log.Error("dbTx.Commit failed", "error", dbTxErr)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -306,36 +424,43 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, []importedBlock, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2Messages []*orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
var importedBlocks []importedBlock
|
||||
var lastAppendMsgHash common.Hash
|
||||
var lastAppendMsgNonce uint64
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
Value *big.Int // uint256
|
||||
Fee *big.Int // uint256
|
||||
Deadline *big.Int // uint256
|
||||
Message []byte
|
||||
MessageNonce *big.Int // uint256
|
||||
GasLimit *big.Int // uint256
|
||||
}{}
|
||||
|
||||
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
|
||||
case bridge_abi.L2SentMessageEventSignature:
|
||||
event := bridge_abi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Error("failed to unpack layer2 SentMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
return l2Messages, relayedMessages, importedBlocks, err
|
||||
}
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.Fee,
|
||||
event.Deadline,
|
||||
event.Message,
|
||||
event.MessageNonce,
|
||||
)
|
||||
// they should always match, just double check
|
||||
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
|
||||
return l2Messages, relayedMessages, importedBlocks, errors.New("l2 message nonce mismatch")
|
||||
}
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
return l2Messages, relayedMessages, importedBlocks, errors.New("l2 message hash mismatch")
|
||||
}
|
||||
// target is in topics[1]
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l2Messages = append(l2Messages, &orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -346,32 +471,55 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L2RelayedMessageEventSignature:
|
||||
event := bridge_abi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, importedBlocks, err
|
||||
}
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L2FailedRelayedMessageEventSignature:
|
||||
event := bridge_abi.L2FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, importedBlocks, err
|
||||
}
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridge_abi.L2ImportBlockEventSignature:
|
||||
event := bridge_abi.L2ImportBlockEvent{}
|
||||
err := utils.UnpackLog(w.blockContainerABI, &event, "ImportBlock", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 ImportBlock event", "err", err)
|
||||
return l2Messages, relayedMessages, importedBlocks, err
|
||||
}
|
||||
importedBlocks = append(importedBlocks, importedBlock{
|
||||
blockHeight: event.BlockHeight.Uint64(),
|
||||
blockHash: event.BlockHash,
|
||||
txHash: vLog.TxHash,
|
||||
})
|
||||
case bridge_abi.L2AppendMessageEventSignature:
|
||||
event := bridge_abi.L2AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, importedBlocks, err
|
||||
}
|
||||
lastAppendMsgHash = event.MessageHash
|
||||
lastAppendMsgNonce = event.Index.Uint64()
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
}
|
||||
|
||||
return l2Messages, relayedMessages, nil
|
||||
return l2Messages, relayedMessages, importedBlocks, nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,8 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer l2db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc, err := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.L2BlockContainerAddress, l2db)
|
||||
assert.NoError(t, err)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -72,7 +73,8 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
assert.NoError(t, err)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -134,7 +136,8 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
assert.NoError(t, err)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -189,8 +192,8 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) (*WatcherClient, error) {
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, contractAddr, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
194
bridge/l2/withdraw_trie.go
Normal file
194
bridge/l2/withdraw_trie.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
// MaxHeight is the maixium possible height of withdraw trie
|
||||
const MaxHeight = 40
|
||||
|
||||
// WithdrawTrie is an append only merkle trie
|
||||
type WithdrawTrie struct {
|
||||
// used to rebuild the merkle tree
|
||||
NextMessageNonce uint64
|
||||
|
||||
height int // current height of withdraw trie
|
||||
|
||||
branches []common.Hash
|
||||
zeroes []common.Hash
|
||||
}
|
||||
|
||||
// NewWithdrawTrie will return a new instance of WithdrawTrie
|
||||
func NewWithdrawTrie() *WithdrawTrie {
|
||||
zeroes := make([]common.Hash, MaxHeight)
|
||||
branches := make([]common.Hash, MaxHeight)
|
||||
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < MaxHeight; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
for i := 0; i < MaxHeight; i++ {
|
||||
branches[i] = common.Hash{}
|
||||
}
|
||||
|
||||
return &WithdrawTrie{
|
||||
zeroes: zeroes,
|
||||
branches: branches,
|
||||
height: -1,
|
||||
NextMessageNonce: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize will initialize the merkle trie with rightest leaf node
|
||||
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
|
||||
proof := DecodeBytesToMerkleProof(proofBytes)
|
||||
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
|
||||
|
||||
w.height = len(proof)
|
||||
w.branches = branches
|
||||
w.NextMessageNonce = currentMessageNonce + 1
|
||||
}
|
||||
|
||||
// AppendMessages append a list of new messages as the rightest leaf node.
|
||||
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
length := len(hashes)
|
||||
if length == 0 {
|
||||
return make([][]byte, 0)
|
||||
}
|
||||
|
||||
cache := make([]map[uint64]common.Hash, MaxHeight)
|
||||
for h := 0; h < MaxHeight; h++ {
|
||||
cache[h] = make(map[uint64]common.Hash)
|
||||
}
|
||||
|
||||
// cache all branches will be used later.
|
||||
if w.NextMessageNonce != 0 {
|
||||
index := w.NextMessageNonce
|
||||
for h := 0; h <= w.height; h++ {
|
||||
if index%2 == 1 {
|
||||
// right child, `w.branches[h]` is the corresponding left child
|
||||
// the index of left child should be `index ^ 1`.
|
||||
cache[h][index^1] = w.branches[h]
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
}
|
||||
// cache all new leaves
|
||||
for i := 0; i < length; i++ {
|
||||
cache[0][w.NextMessageNonce+uint64(i)] = hashes[i]
|
||||
}
|
||||
|
||||
// build withdraw trie with new hashes
|
||||
minIndex := w.NextMessageNonce
|
||||
maxIndex := w.NextMessageNonce + uint64(length) - 1
|
||||
for h := 0; maxIndex > 0; h++ {
|
||||
if minIndex%2 == 1 {
|
||||
minIndex--
|
||||
}
|
||||
if maxIndex%2 == 0 {
|
||||
cache[h][maxIndex^1] = w.zeroes[h]
|
||||
}
|
||||
for i := minIndex; i <= maxIndex; i += 2 {
|
||||
cache[h+1][i>>1] = utils.Keccak2(cache[h][i], cache[h][i^1])
|
||||
}
|
||||
minIndex >>= 1
|
||||
maxIndex >>= 1
|
||||
}
|
||||
|
||||
// update branches using hashes one by one
|
||||
for i := 0; i < length; i++ {
|
||||
proof := UpdateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
|
||||
w.NextMessageNonce++
|
||||
w.height = len(proof)
|
||||
}
|
||||
|
||||
proofs := make([][]byte, length)
|
||||
// retrieve merkle proof from cache
|
||||
for i := 0; i < length; i++ {
|
||||
index := w.NextMessageNonce + uint64(i) - uint64(length)
|
||||
var merkleProof []common.Hash
|
||||
for h := 0; h < w.height; h++ {
|
||||
merkleProof = append(merkleProof, cache[h][index^1])
|
||||
index >>= 1
|
||||
}
|
||||
proofs[i] = EncodeMerkleProofToBytes(merkleProof)
|
||||
}
|
||||
|
||||
return proofs
|
||||
}
|
||||
|
||||
// MessageRoot return the current root hash of withdraw trie.
|
||||
func (w *WithdrawTrie) MessageRoot() common.Hash {
|
||||
if w.height == -1 {
|
||||
return common.Hash{}
|
||||
}
|
||||
return w.branches[w.height]
|
||||
}
|
||||
|
||||
// DecodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
|
||||
func DecodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
|
||||
proof := make([]common.Hash, len(proofBytes)/32)
|
||||
for i := 0; i < len(proofBytes); i += 32 {
|
||||
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
// EncodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
|
||||
func EncodeMerkleProofToBytes(proof []common.Hash) []byte {
|
||||
var proofBytes []byte
|
||||
for i := 0; i < len(proof); i++ {
|
||||
proofBytes = append(proofBytes, proof[i][:]...)
|
||||
}
|
||||
return proofBytes
|
||||
}
|
||||
|
||||
// UpdateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
|
||||
func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
root := msgHash
|
||||
var merkleProof []common.Hash
|
||||
var height uint64
|
||||
for height = 0; index > 0; height++ {
|
||||
if index%2 == 0 {
|
||||
// it may be used in next round.
|
||||
branches[height] = root
|
||||
merkleProof = append(merkleProof, zeroes[height])
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, zeroes[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
root = utils.Keccak2(branches[height], root)
|
||||
merkleProof = append(merkleProof, branches[height])
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
branches[height] = root
|
||||
return merkleProof
|
||||
}
|
||||
|
||||
// RecoverBranchFromProof will recover latest branches from merkle proof and message hash
|
||||
func RecoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
branches := make([]common.Hash, 64)
|
||||
root := msgHash
|
||||
var height uint64
|
||||
for height = 0; index > 0; height++ {
|
||||
if index%2 == 0 {
|
||||
branches[height] = root
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, proof[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
branches[height] = proof[height]
|
||||
root = utils.Keccak2(proof[height], root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
branches[height] = root
|
||||
for height++; height < 64; height++ {
|
||||
branches[height] = common.Hash{}
|
||||
}
|
||||
return branches
|
||||
}
|
||||
213
bridge/l2/withdraw_trie_test.go
Normal file
213
bridge/l2/withdraw_trie_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package l2_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/l2"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
func TestUpdateBranchWithNewMessage(t *testing.T) {
|
||||
zeroes := make([]common.Hash, 64)
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
l2.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
|
||||
}
|
||||
|
||||
l2.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
|
||||
}
|
||||
|
||||
l2.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
|
||||
}
|
||||
|
||||
l2.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
proof := l2.DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
if len(proof) != 4 {
|
||||
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
|
||||
}
|
||||
if proof[0] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901") {
|
||||
t.Fatalf("proof[0] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901", proof[0].Hex())
|
||||
}
|
||||
if proof[1] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902") {
|
||||
t.Fatalf("proof[1] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902", proof[0].Hex())
|
||||
}
|
||||
if proof[2] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903") {
|
||||
t.Fatalf("proof[2] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903", proof[0].Hex())
|
||||
}
|
||||
if proof[3] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904") {
|
||||
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
|
||||
}
|
||||
|
||||
bytes := l2.EncodeMerkleProofToBytes(proof)
|
||||
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
|
||||
t.Fatalf("wrong encoded bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecoverBranchFromProof(t *testing.T) {
|
||||
zeroes := make([]common.Hash, 64)
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
proof := l2.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := l2.RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = l2.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = l2.RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = l2.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = l2.RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = l2.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = l2.RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithdrawTrieOneByOne(t *testing.T) {
|
||||
for initial := 0; initial < 128; initial++ {
|
||||
withdrawTrie := l2.NewWithdrawTrie()
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < initial; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
withdrawTrie.AppendMessages([]common.Hash{
|
||||
hash,
|
||||
})
|
||||
}
|
||||
|
||||
for i := initial; i < 128; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
expectedRoot := computeMerkleRoot(hashes)
|
||||
proofBytes := withdrawTrie.AppendMessages([]common.Hash{
|
||||
hash,
|
||||
})
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
|
||||
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
|
||||
proof := l2.DecodeBytesToMerkleProof(proofBytes[0])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithdrawTrieMultiple(t *testing.T) {
|
||||
var expectedRoots []common.Hash
|
||||
|
||||
{
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < 128; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
expectedRoots = append(expectedRoots, computeMerkleRoot(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
for initial := 0; initial < 100; initial++ {
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < initial; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
|
||||
for finish := initial; finish < 100; finish++ {
|
||||
withdrawTrie := l2.NewWithdrawTrie()
|
||||
withdrawTrie.AppendMessages(hashes)
|
||||
|
||||
var newHashes []common.Hash
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
newHashes = append(newHashes, hash)
|
||||
}
|
||||
proofBytes := withdrawTrie.AppendMessages(newHashes)
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(finish+1))
|
||||
assert.Equal(t, expectedRoots[finish].String(), withdrawTrie.MessageRoot().String())
|
||||
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
proof := l2.DecodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) common.Hash {
|
||||
root := leaf
|
||||
for _, h := range proof {
|
||||
if index%2 == 0 {
|
||||
root = utils.Keccak2(root, h)
|
||||
} else {
|
||||
root = utils.Keccak2(h, root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func computeMerkleRoot(hashes []common.Hash) common.Hash {
|
||||
if len(hashes) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
zeroHash := common.Hash{}
|
||||
for {
|
||||
if len(hashes) == 1 {
|
||||
break
|
||||
}
|
||||
var newHashes []common.Hash
|
||||
for i := 0; i < len(hashes); i += 2 {
|
||||
if i+1 < len(hashes) {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], hashes[i+1]))
|
||||
} else {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], zeroHash))
|
||||
}
|
||||
}
|
||||
hashes = newHashes
|
||||
zeroHash = utils.Keccak2(zeroHash, zeroHash)
|
||||
}
|
||||
return hashes[0]
|
||||
}
|
||||
@@ -23,6 +23,14 @@ contract MockBridgeL1 {
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/******************************
|
||||
* Events from L1MessageQueue *
|
||||
******************************/
|
||||
|
||||
/// @notice Emitted when a L1 to L2 message is appended.
|
||||
/// @param msgHash The hash of the appended message.
|
||||
event AppendMessage(bytes32 indexed msgHash);
|
||||
|
||||
/************************
|
||||
* Events from ZKRollup *
|
||||
************************/
|
||||
@@ -48,14 +56,12 @@ contract MockBridgeL1 {
|
||||
***********/
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
uint256 blockHeight;
|
||||
bytes merkleProof;
|
||||
bytes32 blockHash;
|
||||
bytes32[] messageRootProof;
|
||||
}
|
||||
|
||||
/// @dev The transanction struct
|
||||
struct Layer2Transaction {
|
||||
address caller;
|
||||
uint64 nonce;
|
||||
address target;
|
||||
uint64 gas;
|
||||
@@ -79,6 +85,7 @@ contract MockBridgeL1 {
|
||||
uint64 timestamp;
|
||||
bytes extraData;
|
||||
Layer2Transaction[] txs;
|
||||
bytes32 messageRoot;
|
||||
}
|
||||
|
||||
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
|
||||
@@ -123,6 +130,8 @@ contract MockBridgeL1 {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
uint256 _nonce = messageNonce;
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit AppendMessage(_msghash);
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce += 1;
|
||||
}
|
||||
|
||||
@@ -23,6 +23,35 @@ contract MockBridgeL2 {
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
/******************************
|
||||
* Events from L2MessageQueue *
|
||||
******************************/
|
||||
|
||||
/// @notice Emitted when a new message is added to the merkle tree.
|
||||
/// @param index The index of the corresponding message.
|
||||
/// @param messageHash The hash of the corresponding message.
|
||||
event AppendMessage(uint256 index, bytes32 messageHash);
|
||||
|
||||
/********************************
|
||||
* Events from L1BlockContainer *
|
||||
********************************/
|
||||
|
||||
/// @notice Emitted when a block is imported.
|
||||
/// @param blockHash The hash of the imported block.
|
||||
/// @param blockHeight The height of the imported block.
|
||||
/// @param blockTimestamp The timestamp of the imported block.
|
||||
/// @param stateRoot The state root of the imported block.
|
||||
event ImportBlock(bytes32 indexed blockHash, uint256 blockHeight, uint256 blockTimestamp, bytes32 stateRoot);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct L1MessageProof {
|
||||
bytes32 blockHash;
|
||||
bytes stateRootProof;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
@@ -48,6 +77,7 @@ contract MockBridgeL2 {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit AppendMessage(_nonce, _msghash);
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
@@ -59,9 +89,148 @@ contract MockBridgeL2 {
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
bytes memory _message,
|
||||
L1MessageProof calldata
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
}
|
||||
|
||||
/***********************************
|
||||
* Functions from L1BlockContainer *
|
||||
***********************************/
|
||||
|
||||
function importBlockHeader(
|
||||
bytes32 _blockHash,
|
||||
bytes calldata _blockHeaderRLP,
|
||||
bytes calldata
|
||||
) external {
|
||||
bytes32 _stateRoot;
|
||||
uint64 _height;
|
||||
uint64 _timestamp;
|
||||
|
||||
assembly {
|
||||
// reverts with error `msg`.
|
||||
// make sure the length of error string <= 32
|
||||
function revertWith(msg) {
|
||||
// keccak("Error(string)")
|
||||
mstore(0x00, shl(224, 0x08c379a0))
|
||||
mstore(0x04, 0x20) // str.offset
|
||||
mstore(0x44, msg)
|
||||
let msgLen
|
||||
for {} msg {} {
|
||||
msg := shl(8, msg)
|
||||
msgLen := add(msgLen, 1)
|
||||
}
|
||||
mstore(0x24, msgLen) // str.length
|
||||
revert(0x00, 0x64)
|
||||
}
|
||||
// reverts with `msg` when condition is not matched.
|
||||
// make sure the length of error string <= 32
|
||||
function require(cond, msg) {
|
||||
if iszero(cond) {
|
||||
revertWith(msg)
|
||||
}
|
||||
}
|
||||
// returns the calldata offset of the value and the length in bytes
|
||||
// for the RLP encoded data item at `ptr`. used in `decodeFlat`
|
||||
function decodeValue(ptr) -> dataLen, valueOffset {
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
|
||||
// 0x00 - 0x7f, single byte
|
||||
if lt(b0, 0x80) {
|
||||
// for a single byte whose value is in the [0x00, 0x7f] range,
|
||||
// that byte is its own RLP encoding.
|
||||
dataLen := 1
|
||||
valueOffset := ptr
|
||||
leave
|
||||
}
|
||||
|
||||
// 0x80 - 0xb7, short string/bytes, length <= 55
|
||||
if lt(b0, 0xb8) {
|
||||
// the RLP encoding consists of a single byte with value 0x80
|
||||
// plus the length of the string followed by the string.
|
||||
dataLen := sub(b0, 0x80)
|
||||
valueOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
|
||||
// 0xb8 - 0xbf, long string/bytes, length > 55
|
||||
if lt(b0, 0xc0) {
|
||||
// the RLP encoding consists of a single byte with value 0xb7
|
||||
// plus the length in bytes of the length of the string in binary form,
|
||||
// followed by the length of the string, followed by the string.
|
||||
let lengthBytes := sub(b0, 0xb7)
|
||||
if gt(lengthBytes, 4) {
|
||||
invalid()
|
||||
}
|
||||
|
||||
// load the extended length
|
||||
valueOffset := add(ptr, 1)
|
||||
let extendedLen := calldataload(valueOffset)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
extendedLen := shr(bits, extendedLen)
|
||||
|
||||
dataLen := extendedLen
|
||||
valueOffset := add(valueOffset, lengthBytes)
|
||||
leave
|
||||
}
|
||||
|
||||
revertWith("Not value")
|
||||
}
|
||||
|
||||
let ptr := _blockHeaderRLP.offset
|
||||
let headerPayloadLength
|
||||
{
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
// the input should be a long list
|
||||
if lt(b0, 0xf8) {
|
||||
invalid()
|
||||
}
|
||||
let lengthBytes := sub(b0, 0xf7)
|
||||
if gt(lengthBytes, 32) {
|
||||
invalid()
|
||||
}
|
||||
// load the extended length
|
||||
ptr := add(ptr, 1)
|
||||
headerPayloadLength := calldataload(ptr)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
// compute payload length: extended length + length bytes + 1
|
||||
headerPayloadLength := shr(bits, headerPayloadLength)
|
||||
headerPayloadLength := add(headerPayloadLength, lengthBytes)
|
||||
headerPayloadLength := add(headerPayloadLength, 1)
|
||||
ptr := add(ptr, lengthBytes)
|
||||
}
|
||||
|
||||
let memPtr := mload(0x40)
|
||||
calldatacopy(memPtr, _blockHeaderRLP.offset, headerPayloadLength)
|
||||
let _computedBlockHash := keccak256(memPtr, headerPayloadLength)
|
||||
require(eq(_blockHash, _computedBlockHash), "Block hash mismatch")
|
||||
|
||||
// load 16 vaules
|
||||
for { let i := 0 } lt(i, 16) { i := add(i, 1) } {
|
||||
let len, offset := decodeValue(ptr)
|
||||
// the value we care must have at most 32 bytes
|
||||
if lt(len, 33) {
|
||||
let bits := mul( sub(32, len), 8)
|
||||
let value := calldataload(offset)
|
||||
value := shr(bits, value)
|
||||
mstore(memPtr, value)
|
||||
}
|
||||
memPtr := add(memPtr, 0x20)
|
||||
ptr := add(len, offset)
|
||||
}
|
||||
require(eq(ptr, add(_blockHeaderRLP.offset, _blockHeaderRLP.length)), "Header RLP length mismatch")
|
||||
|
||||
memPtr := mload(0x40)
|
||||
// load state root, 4-th entry
|
||||
_stateRoot := mload(add(memPtr, 0x60))
|
||||
// load block height, 9-th entry
|
||||
_height := mload(add(memPtr, 0x100))
|
||||
// load block timestamp, 12-th entry
|
||||
_timestamp := mload(add(memPtr, 0x160))
|
||||
}
|
||||
|
||||
emit ImportBlock(_blockHash, _height, _timestamp, _stateRoot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -31,8 +33,10 @@ var (
|
||||
dbImg docker.ImgInstance
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
l1ethClient *ethclient.Client
|
||||
l2ethClient *ethclient.Client
|
||||
l1gethClient *gethclient.Client
|
||||
l2gethClient *gethclient.Client
|
||||
|
||||
// auth
|
||||
l1Auth *bind.TransactOpts
|
||||
@@ -85,20 +89,25 @@ func setupEnv(t *testing.T) {
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
// Create l1geth and l2geth client.
|
||||
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
l1rawClient, err := rpc.DialContext(context.Background(), cfg.L1Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
l1ethClient = ethclient.NewClient(l1rawClient)
|
||||
l1gethClient = gethclient.New(l1rawClient)
|
||||
|
||||
l2rawClient, err := rpc.DialContext(context.Background(), cfg.L2Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
l2ethClient = ethclient.NewClient(l2rawClient)
|
||||
l2gethClient = gethclient.New(l2rawClient)
|
||||
|
||||
// Create l1 and l2 auth
|
||||
l1Auth = prepareAuth(t, l1Client, privateKey)
|
||||
l2Auth = prepareAuth(t, l2Client, privateKey)
|
||||
l1Auth = prepareAuth(t, l1ethClient, privateKey)
|
||||
l2Auth = prepareAuth(t, l2ethClient, privateKey)
|
||||
|
||||
// send some balance to message and rollup sender
|
||||
transferEther(t, l1Auth, l1Client, messagePrivateKey)
|
||||
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, messagePrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
|
||||
transferEther(t, l1Auth, l1ethClient, messagePrivateKey)
|
||||
transferEther(t, l1Auth, l1ethClient, rollupPrivateKey)
|
||||
transferEther(t, l2Auth, l2ethClient, messagePrivateKey)
|
||||
transferEther(t, l2Auth, l2ethClient, rollupPrivateKey)
|
||||
}
|
||||
|
||||
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
|
||||
@@ -153,21 +162,21 @@ func prepareContracts(t *testing.T) {
|
||||
var tx *types.Transaction
|
||||
|
||||
// L1 messenger contract
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1ethClient)
|
||||
assert.NoError(t, err)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1ethClient, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 rollup contract
|
||||
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1ethClient)
|
||||
assert.NoError(t, err)
|
||||
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1ethClient, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L2 messenger contract
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2ethClient)
|
||||
assert.NoError(t, err)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2ethClient, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
|
||||
|
||||
@@ -37,18 +37,20 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
l2Watcher, err := l2.NewL2WatcherClient(context.Background(), l2ethClient, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessageQueueAddress, l2Cfg.L2MessengerAddress, l2Cfg.L2BlockContainerAddress, db)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher, err := l1.NewWatcher(context.Background(), l1gethClient, l1ethClient, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RollupContractAddress, db)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l2ethClient, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
@@ -83,12 +85,12 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
&orm.L2BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
&orm.L2BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
@@ -118,9 +120,9 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
commitTx, _, err := l1ethClient.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1ethClient, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
@@ -139,9 +141,9 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
finalizeTx, _, err := l1ethClient.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1ethClient, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
@@ -156,13 +158,13 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
l2Relayer.ProcessSavedEvents(&wg)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgSubmitted)
|
||||
assert.Equal(t, orm.MsgSubmitted, msg.Status)
|
||||
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, relayTxHash.Valid)
|
||||
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
|
||||
relayTx, _, err := l1ethClient.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l1ethClient, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
@@ -171,5 +173,5 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgConfirmed)
|
||||
assert.Equal(t, orm.MsgConfirmed, msg.Status)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher, err := l1.NewWatcher(context.Background(), l1gethClient, l1ethClient, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RollupContractAddress, db)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
@@ -60,12 +61,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
&orm.L2BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
&orm.L2BlockInfo{
|
||||
Number: traces[1].Header.Number.Uint64(),
|
||||
Hash: traces[1].Header.Hash().String(),
|
||||
ParentHash: traces[1].Header.ParentHash.String(),
|
||||
@@ -91,9 +92,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
commitTx, _, err := l1ethClient.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1ethClient, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
@@ -123,9 +124,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
finalizeTx, _, err := l1ethClient.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1ethClient, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
|
||||
@@ -2,11 +2,16 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/keccak256"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
@@ -67,3 +72,77 @@ func BufferToUint256Le(buffer []byte) []*big.Int {
|
||||
}
|
||||
return buffer256
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// GetL1MessageProof will fetch storage proof from geth client
|
||||
func GetL1MessageProof(client *gethclient.Client, account common.Address, hashes []common.Hash, height uint64) ([][]byte, error) {
|
||||
slot := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||
var keys []string
|
||||
for _, hash := range hashes {
|
||||
keys = append(keys, Keccak2(hash, slot).String())
|
||||
}
|
||||
results, err := client.GetProof(context.Background(), account, keys, big.NewInt(int64(height)))
|
||||
if err != nil {
|
||||
return make([][]byte, 0), err
|
||||
}
|
||||
|
||||
accountProof := results.AccountProof
|
||||
var proofs [][]byte
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
var proof []byte
|
||||
proof = append(proof, big.NewInt(int64(len(accountProof))).Bytes()...)
|
||||
for _, item := range results.AccountProof {
|
||||
// remove 0x prefix
|
||||
proof = append(proof, common.Hex2Bytes(item[2:])...)
|
||||
}
|
||||
|
||||
// the storage proof should have the same order with `hashes`
|
||||
storageProof := results.StorageProof[i]
|
||||
proof = append(proof, big.NewInt(int64(len(storageProof.Proof))).Bytes()...)
|
||||
for _, item := range storageProof.Proof {
|
||||
// remove 0x prefix
|
||||
proof = append(proof, common.Hex2Bytes(item[2:])...)
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
@@ -3,3 +3,5 @@ artifacts
|
||||
cache
|
||||
coverage*
|
||||
gasReporterOutput.json
|
||||
src/L2/predeploys/L1BlockContainer.sol
|
||||
src/libraries/verifier/PatriciaMerkleTrieVerifier.sol
|
||||
|
||||
@@ -45,31 +45,16 @@ yarn install
|
||||
|
||||
## Build
|
||||
|
||||
+ Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
+ Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
+ Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
+ Run `forge build` to compile contracts with foundry.
|
||||
+ Run `npx hardhat compile` to compile with hardhat.
|
||||
+ Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
+ Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
- Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
- Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
- Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
- Run `forge build` to compile contracts with foundry.
|
||||
- Run `npx hardhat compile` to compile with hardhat.
|
||||
- Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
- Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
|
||||
## TODO
|
||||
|
||||
- [ ] unit tests
|
||||
- [ ] L1 Messenger
|
||||
- [x] L1 Gateways
|
||||
- [x] L1 Gateway Router
|
||||
- [ ] L2 Messenger
|
||||
- [x] L2 Gateways
|
||||
- [x] L2 Gateway Router
|
||||
- [x] ScrollStandardERC20Factory
|
||||
- [x] Whitelist
|
||||
- [ ] SimpleGasOracle
|
||||
- [ ] integration tests
|
||||
- [x] ERC20Gateway
|
||||
- [x] GatewayRouter
|
||||
- [ ] ZKRollup contracts
|
||||
- [x] Gas Oracle contracts for cross chain message call
|
||||
- [ ] ERC721/ERC115 interface design
|
||||
- [ ] add proof verification codes
|
||||
- [ ] layer1 to layer2 proof
|
||||
- [ ] security analysis
|
||||
|
||||
@@ -366,6 +366,23 @@ Update layer 2 to layer 2 token mapping.
|
||||
| _l1Token | address | The address of corresponding ERC1155 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -311,6 +311,23 @@ Update layer 2 to layer 2 token mapping.
|
||||
| _l1Token | address | The address of corresponding ERC721 token in layer 2. |
|
||||
| _l2Token | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -380,6 +380,23 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -30,7 +30,7 @@ The amount of seconds needed to wait if we want to drop message.
|
||||
### dropMessage
|
||||
|
||||
```solidity
|
||||
function dropMessage(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message, uint256 _gasLimit) external nonpayable
|
||||
function dropMessage(address, address, uint256, uint256, uint256, uint256, bytes, uint256) external nonpayable
|
||||
```
|
||||
|
||||
|
||||
@@ -41,14 +41,14 @@ function dropMessage(address _from, address _to, uint256 _value, uint256 _fee, u
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _from | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _value | uint256 | undefined |
|
||||
| _fee | uint256 | undefined |
|
||||
| _deadline | uint256 | undefined |
|
||||
| _nonce | uint256 | undefined |
|
||||
| _message | bytes | undefined |
|
||||
| _gasLimit | uint256 | undefined |
|
||||
| _0 | address | undefined |
|
||||
| _1 | address | undefined |
|
||||
| _2 | uint256 | undefined |
|
||||
| _3 | uint256 | undefined |
|
||||
| _4 | uint256 | undefined |
|
||||
| _5 | uint256 | undefined |
|
||||
| _6 | bytes | undefined |
|
||||
| _7 | uint256 | undefined |
|
||||
|
||||
### gasOracle
|
||||
|
||||
@@ -149,6 +149,23 @@ Mapping from relay id to relay status.
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### messageQueue
|
||||
|
||||
```solidity
|
||||
function messageQueue() external view returns (contract L1MessageQueue)
|
||||
```
|
||||
|
||||
The address of L1MessageQueue contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | contract L1MessageQueue | undefined |
|
||||
|
||||
### owner
|
||||
|
||||
```solidity
|
||||
@@ -352,6 +369,23 @@ Update whitelist contract.
|
||||
|---|---|---|
|
||||
| _newWhitelist | address | The address of new whitelist contract. |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### whitelist
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -226,6 +226,23 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -226,6 +226,23 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
|
||||
@@ -327,6 +327,23 @@ Update layer 2 to layer 1 token mapping.
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC1155 token in layer 1. |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### withdrawERC1155
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -274,6 +274,23 @@ Update layer 2 to layer 1 token mapping.
|
||||
| _l2Token | address | undefined |
|
||||
| _l1Token | address | The address of ERC721 token in layer 1. |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### withdrawERC721
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -312,6 +312,23 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### withdrawERC20
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -10,6 +10,23 @@ The `L2ScrollMessenger` contract can: 1. send messages from layer 2 to layer 1;
|
||||
|
||||
## Methods
|
||||
|
||||
### blockContainer
|
||||
|
||||
```solidity
|
||||
function blockContainer() external view returns (contract IL1BlockContainer)
|
||||
```
|
||||
|
||||
The contract contains the list of L1 blocks.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | contract IL1BlockContainer | undefined |
|
||||
|
||||
### dropDelayDuration
|
||||
|
||||
```solidity
|
||||
@@ -111,27 +128,10 @@ Mapping from relay id to relay status.
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### messageNonce
|
||||
### messageQueue
|
||||
|
||||
```solidity
|
||||
function messageNonce() external view returns (uint256)
|
||||
```
|
||||
|
||||
Message nonce, used to avoid relay attack.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### messagePasser
|
||||
|
||||
```solidity
|
||||
function messagePasser() external view returns (contract L2ToL1MessagePasser)
|
||||
function messageQueue() external view returns (contract L2MessageQueue)
|
||||
```
|
||||
|
||||
Contract to store the sent message.
|
||||
@@ -143,7 +143,7 @@ Contract to store the sent message.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | contract L2ToL1MessagePasser | undefined |
|
||||
| _0 | contract L2MessageQueue | undefined |
|
||||
|
||||
### owner
|
||||
|
||||
@@ -162,27 +162,28 @@ The address of the current owner.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### relayMessage
|
||||
### relayMessageWithProof
|
||||
|
||||
```solidity
|
||||
function relayMessage(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message) external nonpayable
|
||||
function relayMessageWithProof(address _from, address _to, uint256 _value, uint256 _fee, uint256 _deadline, uint256 _nonce, bytes _message, IL2ScrollMessenger.L1MessageProof _proof) external nonpayable
|
||||
```
|
||||
|
||||
execute L1 => L2 message
|
||||
|
||||
*Make sure this is only called by privileged accounts.*
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _from | address | The address of the sender of the message. |
|
||||
| _to | address | The address of the recipient of the message. |
|
||||
| _value | uint256 | The msg.value passed to the message call. |
|
||||
| _fee | uint256 | The amount of fee in ETH to charge. |
|
||||
| _deadline | uint256 | The deadline of the message. |
|
||||
| _nonce | uint256 | The nonce of the message to avoid replay attack. |
|
||||
| _message | bytes | The content of the message. |
|
||||
| _from | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _value | uint256 | undefined |
|
||||
| _fee | uint256 | undefined |
|
||||
| _deadline | uint256 | undefined |
|
||||
| _nonce | uint256 | undefined |
|
||||
| _message | bytes | undefined |
|
||||
| _proof | IL2ScrollMessenger.L1MessageProof | undefined |
|
||||
|
||||
### renounceOwnership
|
||||
|
||||
@@ -214,6 +215,22 @@ Send cross chain message (L1 => L2 or L2 => L1)
|
||||
| _message | bytes | The content of the message. |
|
||||
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
|
||||
|
||||
### setBlockContainer
|
||||
|
||||
```solidity
|
||||
function setBlockContainer(address _blockContainer) external nonpayable
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _blockContainer | address | undefined |
|
||||
|
||||
### transferOwnership
|
||||
|
||||
```solidity
|
||||
@@ -278,6 +295,23 @@ Update whitelist contract.
|
||||
|---|---|---|
|
||||
| _newWhitelist | address | The address of new whitelist contract. |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### whitelist
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -173,6 +173,23 @@ The address of ScrollStandardERC20Factory.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### withdrawERC20
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -191,6 +191,23 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
the current contract version.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | string | undefined |
|
||||
|
||||
### withdrawERC20
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -10,34 +10,6 @@ This contract maintains essential data for zk rollup, including: 1. a list of pe
|
||||
|
||||
## Methods
|
||||
|
||||
### appendMessage
|
||||
|
||||
```solidity
|
||||
function appendMessage(address _sender, address _target, uint256 _value, uint256 _fee, uint256 _deadline, bytes _message, uint256 _gasLimit) external nonpayable returns (uint256)
|
||||
```
|
||||
|
||||
Append a cross chain message to message queue.
|
||||
|
||||
*This function should only be called by L1ScrollMessenger for safety.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _sender | address | The address of message sender in layer 1. |
|
||||
| _target | address | The address of message recipient in layer 2. |
|
||||
| _value | uint256 | The amount of ether sent to recipient in layer 2. |
|
||||
| _fee | uint256 | The amount of ether paid to relayer in layer 2. |
|
||||
| _deadline | uint256 | The deadline of the message. |
|
||||
| _message | bytes | The content of the message. |
|
||||
| _gasLimit | uint256 | Unused, but included for potential forward compatibility considerations. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### batches
|
||||
|
||||
```solidity
|
||||
@@ -66,7 +38,7 @@ Mapping from batch id to batch struct.
|
||||
### blocks
|
||||
|
||||
```solidity
|
||||
function blocks(bytes32) external view returns (bytes32 parentHash, bytes32 transactionRoot, uint64 blockHeight, uint64 batchIndex)
|
||||
function blocks(bytes32) external view returns (bytes32 parentHash, bytes32 transactionRoot, uint64 blockHeight, uint64 batchIndex, bytes32 messageRoot)
|
||||
```
|
||||
|
||||
Mapping from block hash to block struct.
|
||||
@@ -87,6 +59,7 @@ Mapping from block hash to block struct.
|
||||
| transactionRoot | bytes32 | undefined |
|
||||
| blockHeight | uint64 | undefined |
|
||||
| batchIndex | uint64 | undefined |
|
||||
| messageRoot | bytes32 | undefined |
|
||||
|
||||
### commitBatch
|
||||
|
||||
@@ -144,13 +117,13 @@ Mapping from batch index to finalized batch id.
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
### getMessageHashByIndex
|
||||
### getL2MessageRoot
|
||||
|
||||
```solidity
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32)
|
||||
function getL2MessageRoot(bytes32 _blockHash) external view returns (bytes32)
|
||||
```
|
||||
|
||||
Return the message hash by index.
|
||||
Return the merkle root of L2 message tree.
|
||||
|
||||
|
||||
|
||||
@@ -158,7 +131,7 @@ Return the message hash by index.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _index | uint256 | The index to query. |
|
||||
| _blockHash | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -166,40 +139,6 @@ Return the message hash by index.
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
### getNextQueueIndex
|
||||
|
||||
```solidity
|
||||
function getNextQueueIndex() external view returns (uint256)
|
||||
```
|
||||
|
||||
Return the index of the first queue element not yet executed.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### getQeueuLength
|
||||
|
||||
```solidity
|
||||
function getQeueuLength() external view returns (uint256)
|
||||
```
|
||||
|
||||
Return the total number of appended message.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### importGenesisBlock
|
||||
|
||||
```solidity
|
||||
@@ -332,23 +271,6 @@ Return the layer 2 block gas limit.
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### messenger
|
||||
|
||||
```solidity
|
||||
function messenger() external view returns (address)
|
||||
```
|
||||
|
||||
The address of L1ScrollMessenger.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### operator
|
||||
|
||||
```solidity
|
||||
@@ -426,22 +348,6 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateMessenger
|
||||
|
||||
```solidity
|
||||
function updateMessenger(address _newMessenger) external nonpayable
|
||||
```
|
||||
|
||||
Update the address of messenger.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newMessenger | address | The new messenger address to update. |
|
||||
|
||||
### updateOperator
|
||||
|
||||
```solidity
|
||||
@@ -458,28 +364,22 @@ Update the address of operator.
|
||||
|---|---|---|
|
||||
| _newOperator | address | The new operator address to update. |
|
||||
|
||||
### verifyMessageStateProof
|
||||
### version
|
||||
|
||||
```solidity
|
||||
function verifyMessageStateProof(uint256 _batchIndex, uint256 _blockHeight) external view returns (bool)
|
||||
function version() external view returns (string)
|
||||
```
|
||||
|
||||
Verify a state proof for message relay.
|
||||
the current contract version.
|
||||
|
||||
*add more fields.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _batchIndex | uint256 | undefined |
|
||||
| _blockHeight | uint256 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
| _0 | string | undefined |
|
||||
|
||||
|
||||
|
||||
@@ -556,23 +456,6 @@ Emitted when a batch is reverted.
|
||||
|---|---|---|
|
||||
| _batchId `indexed` | bytes32 | undefined |
|
||||
|
||||
### UpdateMesssenger
|
||||
|
||||
```solidity
|
||||
event UpdateMesssenger(address _oldMesssenger, address _newMesssenger)
|
||||
```
|
||||
|
||||
Emitted when owner updates address of messenger
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldMesssenger | address | The address of old messenger contract. |
|
||||
| _newMesssenger | address | The address of new messenger contract. |
|
||||
|
||||
### UpdateOperator
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -116,6 +116,7 @@ const config: HardhatUserConfig = {
|
||||
"IL2ERC1155Gateway",
|
||||
"IScrollStandardERC20Factory",
|
||||
"IZKRollup",
|
||||
"MockL1ScrollMessenger",
|
||||
"WETH9",
|
||||
],
|
||||
},
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { constants } from "ethers";
|
||||
import { keccak256 } from "ethers/lib/utils";
|
||||
import { concat, keccak256 } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import {
|
||||
ZKRollup,
|
||||
L1ScrollMessenger,
|
||||
MockL1ScrollMessenger,
|
||||
L2ScrollMessenger,
|
||||
L1StandardERC20Gateway,
|
||||
L2StandardERC20Gateway,
|
||||
@@ -17,6 +17,8 @@ import {
|
||||
L1WETHGateway,
|
||||
L2WETHGateway,
|
||||
WETH9,
|
||||
L2MessageQueue,
|
||||
L1MessageQueue,
|
||||
} from "../typechain";
|
||||
|
||||
describe("ERC20Gateway", async () => {
|
||||
@@ -30,8 +32,10 @@ describe("ERC20Gateway", async () => {
|
||||
let router: SignerWithAddress;
|
||||
|
||||
let rollup: ZKRollup;
|
||||
let l1Messenger: L1ScrollMessenger;
|
||||
let l1Messenger: MockL1ScrollMessenger;
|
||||
let l2Messenger: L2ScrollMessenger;
|
||||
let l1MessageQueue: L1MessageQueue;
|
||||
let l2MessageQueue: L2MessageQueue;
|
||||
|
||||
beforeEach(async () => {
|
||||
[deployer, alice, bob, router] = await ethers.getSigners();
|
||||
@@ -57,17 +61,21 @@ describe("ERC20Gateway", async () => {
|
||||
timestamp: 0,
|
||||
extraData: "0x",
|
||||
txs: [],
|
||||
messageRoot: constants.HashZero,
|
||||
});
|
||||
|
||||
// deploy L1ScrollMessenger in layer 1
|
||||
const L1ScrollMessenger = await ethers.getContractFactory("L1ScrollMessenger", deployer);
|
||||
l1Messenger = await L1ScrollMessenger.deploy();
|
||||
// deploy MockL1ScrollMessenger in layer 1
|
||||
const MockL1ScrollMessenger = await ethers.getContractFactory("MockL1ScrollMessenger", deployer);
|
||||
l1Messenger = await MockL1ScrollMessenger.deploy();
|
||||
await l1Messenger.initialize(rollup.address);
|
||||
await rollup.updateMessenger(l1Messenger.address);
|
||||
await rollup.updateOperator(deployer.address);
|
||||
|
||||
// deploy L2ScrollMessenger in layer 2
|
||||
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
|
||||
l2Messenger = await L2ScrollMessenger.deploy(deployer.address);
|
||||
|
||||
l1MessageQueue = await ethers.getContractAt("L1MessageQueue", await l1Messenger.messageQueue(), deployer);
|
||||
l2MessageQueue = await ethers.getContractAt("L2MessageQueue", await l2Messenger.messageQueue(), deployer);
|
||||
});
|
||||
|
||||
context("StandardERC20Gateway", async () => {
|
||||
@@ -138,7 +146,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l1Token.connect(alice).approve(l1Gateway.address, amount1);
|
||||
|
||||
// 2. do deposit
|
||||
const nonce = await rollup.getQeueuLength();
|
||||
const nonce = await l1MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
|
||||
const depositTx = sendToSelf
|
||||
? await l1Gateway
|
||||
@@ -182,14 +190,18 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 3. do relay in layer 2
|
||||
const beforeBalanceLayer2 = constants.Zero;
|
||||
const relayTx = await l2Messenger.relayMessage(
|
||||
const relayTx = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
0,
|
||||
0,
|
||||
deadline,
|
||||
nonce,
|
||||
messageData
|
||||
messageData,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
|
||||
@@ -212,7 +224,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l1Token.connect(alice).approve(l1Gateway.address, amount1);
|
||||
|
||||
// 2. do deposit first time
|
||||
const nonce1 = await rollup.getQeueuLength();
|
||||
const nonce1 = await l1MessageQueue.nextMessageIndex();
|
||||
let beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
|
||||
const depositTx1 = sendToSelf
|
||||
? await l1Gateway
|
||||
@@ -248,14 +260,18 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 3. do relay in layer 2 first time
|
||||
let beforeBalanceLayer2 = constants.Zero;
|
||||
const relayTx1 = await l2Messenger.relayMessage(
|
||||
const relayTx1 = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
0,
|
||||
0,
|
||||
deadline1,
|
||||
nonce1,
|
||||
messageData1
|
||||
messageData1,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx1.wait();
|
||||
let afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
|
||||
@@ -266,7 +282,7 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 5. do deposit second time
|
||||
const calldata = "0x000033";
|
||||
const nonce2 = await rollup.getQeueuLength();
|
||||
const nonce2 = await l1MessageQueue.nextMessageIndex();
|
||||
beforeBalanceLayer1 = await l1Token.balanceOf(l1Gateway.address);
|
||||
const depositTx2 = await l1Gateway
|
||||
.connect(alice)
|
||||
@@ -295,14 +311,18 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 3. do relay in layer 2
|
||||
beforeBalanceLayer2 = await l2Token.balanceOf(recipient.address);
|
||||
const relayTx2 = await l2Messenger.relayMessage(
|
||||
const relayTx2 = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
0,
|
||||
0,
|
||||
deadline2,
|
||||
nonce2,
|
||||
messageData2
|
||||
messageData2,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx2.wait();
|
||||
afterBalanceLayer2 = await l2Token.balanceOf(recipient.address);
|
||||
@@ -353,7 +373,7 @@ describe("ERC20Gateway", async () => {
|
||||
[symbol, name, decimals]
|
||||
);
|
||||
const deadline = (await ethers.provider.getBlock("latest")).timestamp + DROP_DELAY_DURATION;
|
||||
const nonce = await rollup.getQeueuLength();
|
||||
const nonce = await l1MessageQueue.nextMessageIndex();
|
||||
const messageData = l2Gateway.interface.encodeFunctionData("finalizeDepositERC20", [
|
||||
l1Token.address,
|
||||
l2Token.address,
|
||||
@@ -362,14 +382,18 @@ describe("ERC20Gateway", async () => {
|
||||
amount,
|
||||
ethers.utils.defaultAbiCoder.encode(["bytes", "bytes"], ["0x", deployData]),
|
||||
]);
|
||||
const relayTx = await l2Messenger.relayMessage(
|
||||
const relayTx = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
0,
|
||||
0,
|
||||
deadline,
|
||||
nonce,
|
||||
messageData
|
||||
messageData,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx.wait();
|
||||
|
||||
@@ -381,7 +405,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l2Token.connect(alice).approve(l2Gateway.address, amount);
|
||||
|
||||
// 2. withdraw
|
||||
const nonce = await l2Messenger.messageNonce();
|
||||
const nonce = await l2MessageQueue.nextMessageIndex();
|
||||
const balanceBefore = await l2Token.balanceOf(alice.address);
|
||||
const withdrawTx = sendToSelf
|
||||
? await l2Gateway
|
||||
@@ -411,13 +435,48 @@ describe("ERC20Gateway", async () => {
|
||||
amount,
|
||||
"0x",
|
||||
]);
|
||||
|
||||
const messageHash = keccak256(
|
||||
concat([
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [deadline]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [nonce]),
|
||||
messageData,
|
||||
])
|
||||
);
|
||||
await expect(withdrawTx)
|
||||
.to.emit(l2Messenger, "SentMessage")
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit);
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit)
|
||||
.to.emit(l2MessageQueue, "AppendMessage")
|
||||
.withArgs(0, messageHash);
|
||||
// should transfer from alice
|
||||
expect(balanceBefore.sub(balanceAfter)).to.eq(amount);
|
||||
|
||||
// 3. relay in layer 1
|
||||
// 3. import block to rollup contract
|
||||
const blockHash = (await ethers.provider.getBlock("latest")).hash;
|
||||
await rollup.commitBatch({
|
||||
batchIndex: 1,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
blocks: [
|
||||
{
|
||||
blockHash,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
baseFee: 0,
|
||||
stateRoot: constants.HashZero,
|
||||
blockHeight: 1,
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: [],
|
||||
txs: [],
|
||||
messageRoot: await l2MessageQueue.messageRoot(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// 4. relay in layer 1
|
||||
const relayTx = await l1Messenger.relayMessageWithProof(
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
@@ -426,7 +485,7 @@ describe("ERC20Gateway", async () => {
|
||||
deadline,
|
||||
nonce,
|
||||
messageData,
|
||||
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
|
||||
{ blockHash, messageRootProof: [] }
|
||||
);
|
||||
await relayTx.wait();
|
||||
// should emit RelayedMessage
|
||||
@@ -446,7 +505,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l2Token.connect(alice).approve(l2Gateway.address, amount);
|
||||
|
||||
// 2. withdraw
|
||||
const nonce = await l2Messenger.messageNonce();
|
||||
const nonce = await l2MessageQueue.nextMessageIndex();
|
||||
const withdrawTx = await l2Gateway
|
||||
.connect(alice)
|
||||
.withdrawERC20AndCall(l2Token.address, recipient.address, amount, calldata, layer2GasLimit);
|
||||
@@ -465,11 +524,45 @@ describe("ERC20Gateway", async () => {
|
||||
amount,
|
||||
calldata,
|
||||
]);
|
||||
const messageHash = keccak256(
|
||||
concat([
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [deadline]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [nonce]),
|
||||
messageData,
|
||||
])
|
||||
);
|
||||
await expect(withdrawTx)
|
||||
.to.emit(l2Messenger, "SentMessage")
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit);
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, 0, 0, deadline, messageData, nonce, layer2GasLimit)
|
||||
.to.emit(l2MessageQueue, "AppendMessage")
|
||||
.withArgs(0, messageHash);
|
||||
|
||||
// 3. relay in layer 1
|
||||
// 3. import block to rollup contract
|
||||
const blockHash = (await ethers.provider.getBlock("latest")).hash;
|
||||
await rollup.commitBatch({
|
||||
batchIndex: 1,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
blocks: [
|
||||
{
|
||||
blockHash,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
baseFee: 0,
|
||||
stateRoot: constants.HashZero,
|
||||
blockHeight: 1,
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: [],
|
||||
txs: [],
|
||||
messageRoot: await l2MessageQueue.messageRoot(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// 4. relay in layer 1
|
||||
const relayTx = await l1Messenger.relayMessageWithProof(
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
@@ -478,7 +571,7 @@ describe("ERC20Gateway", async () => {
|
||||
deadline,
|
||||
nonce,
|
||||
messageData,
|
||||
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
|
||||
{ blockHash, messageRootProof: [] }
|
||||
);
|
||||
await relayTx.wait();
|
||||
// should emit RelayedMessage
|
||||
@@ -562,7 +655,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l1WETH.connect(alice).approve(l1Gateway.address, amount);
|
||||
|
||||
// 2. do deposit
|
||||
const nonce = await rollup.getQeueuLength();
|
||||
const nonce = await l1MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
|
||||
const depositTx = sendToSelf
|
||||
? await l1Gateway
|
||||
@@ -600,14 +693,18 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 3. do relay in layer 2
|
||||
const beforeBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
|
||||
const relayTx = await l2Messenger.relayMessage(
|
||||
const relayTx = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
amount,
|
||||
0,
|
||||
deadline,
|
||||
nonce,
|
||||
messageData
|
||||
messageData,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
|
||||
@@ -628,7 +725,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l1WETH.connect(alice).approve(l1Gateway.address, amount);
|
||||
|
||||
// 2. do deposit
|
||||
const nonce = await rollup.getQeueuLength();
|
||||
const nonce = await l1MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
|
||||
const depositTx = await l1Gateway
|
||||
.connect(alice)
|
||||
@@ -657,14 +754,18 @@ describe("ERC20Gateway", async () => {
|
||||
|
||||
// 3. do relay in layer 2
|
||||
const beforeBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
|
||||
const relayTx = await l2Messenger.relayMessage(
|
||||
const relayTx = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
amount,
|
||||
0,
|
||||
deadline,
|
||||
nonce,
|
||||
messageData
|
||||
messageData,
|
||||
{
|
||||
blockHash: constants.HashZero,
|
||||
stateRootProof: "0x",
|
||||
}
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer2 = await l2WETH.balanceOf(recipient.address);
|
||||
@@ -699,7 +800,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l2WETH.connect(alice).approve(l2Gateway.address, amount);
|
||||
|
||||
// 2. do withdraw in layer 2
|
||||
const nonce = await l2Messenger.messageNonce();
|
||||
const nonce = await l2MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
|
||||
const withdrawTx = sendToSelf
|
||||
? await l2Gateway
|
||||
@@ -729,13 +830,47 @@ describe("ERC20Gateway", async () => {
|
||||
amount,
|
||||
"0x",
|
||||
]);
|
||||
const messageHash = keccak256(
|
||||
concat([
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [amount]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [deadline]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [nonce]),
|
||||
messageData,
|
||||
])
|
||||
);
|
||||
await expect(withdrawTx)
|
||||
.to.emit(l2Messenger, "SentMessage")
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit)
|
||||
.to.emit(l2MessageQueue, "AppendMessage")
|
||||
.withArgs(0, messageHash);
|
||||
// should unwrap transfer to messenger
|
||||
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
|
||||
|
||||
// 3. do relay in layer 1
|
||||
// 3. import block to rollup contract
|
||||
const blockHash = (await ethers.provider.getBlock("latest")).hash;
|
||||
await rollup.commitBatch({
|
||||
batchIndex: 1,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
blocks: [
|
||||
{
|
||||
blockHash,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
baseFee: 0,
|
||||
stateRoot: constants.HashZero,
|
||||
blockHeight: 1,
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: [],
|
||||
txs: [],
|
||||
messageRoot: await l2MessageQueue.messageRoot(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// 4. do relay in layer 1
|
||||
const beforeBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
|
||||
const relayTx = await l1Messenger.relayMessageWithProof(
|
||||
l2Gateway.address,
|
||||
@@ -745,7 +880,7 @@ describe("ERC20Gateway", async () => {
|
||||
deadline,
|
||||
nonce,
|
||||
messageData,
|
||||
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
|
||||
{ blockHash, messageRootProof: [] }
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
|
||||
@@ -765,7 +900,7 @@ describe("ERC20Gateway", async () => {
|
||||
await l2WETH.connect(alice).approve(l2Gateway.address, amount);
|
||||
|
||||
// 2. do withdraw in layer 2
|
||||
const nonce = await l2Messenger.messageNonce();
|
||||
const nonce = await l2MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
|
||||
const withdrawTx = await l2Gateway
|
||||
.connect(alice)
|
||||
@@ -786,13 +921,48 @@ describe("ERC20Gateway", async () => {
|
||||
amount,
|
||||
calldata,
|
||||
]);
|
||||
|
||||
const messageHash = keccak256(
|
||||
concat([
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [amount]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [deadline]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [nonce]),
|
||||
messageData,
|
||||
])
|
||||
);
|
||||
await expect(withdrawTx)
|
||||
.to.emit(l2Messenger, "SentMessage")
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit)
|
||||
.to.emit(l2MessageQueue, "AppendMessage")
|
||||
.withArgs(0, messageHash);
|
||||
// should unwrap transfer to messenger
|
||||
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
|
||||
|
||||
// 3. do relay in layer 1
|
||||
// 3. import block to rollup contract
|
||||
const blockHash = (await ethers.provider.getBlock("latest")).hash;
|
||||
await rollup.commitBatch({
|
||||
batchIndex: 1,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
blocks: [
|
||||
{
|
||||
blockHash,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
baseFee: 0,
|
||||
stateRoot: constants.HashZero,
|
||||
blockHeight: 1,
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: [],
|
||||
txs: [],
|
||||
messageRoot: await l2MessageQueue.messageRoot(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// 4. do relay in layer 1
|
||||
const beforeBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
|
||||
const relayTx = await l1Messenger.relayMessageWithProof(
|
||||
l2Gateway.address,
|
||||
@@ -802,7 +972,7 @@ describe("ERC20Gateway", async () => {
|
||||
deadline,
|
||||
nonce,
|
||||
messageData,
|
||||
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
|
||||
{ blockHash, messageRootProof: [] }
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer1 = await l1WETH.balanceOf(recipient.address);
|
||||
|
||||
@@ -3,9 +3,17 @@
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { constants } from "ethers";
|
||||
import { keccak256 } from "ethers/lib/utils";
|
||||
import { concat, keccak256 } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { ZKRollup, L1ScrollMessenger, L2ScrollMessenger, L1GatewayRouter, L2GatewayRouter } from "../typechain";
|
||||
import {
|
||||
ZKRollup,
|
||||
MockL1ScrollMessenger,
|
||||
L2ScrollMessenger,
|
||||
L1GatewayRouter,
|
||||
L2GatewayRouter,
|
||||
L2MessageQueue,
|
||||
L1MessageQueue,
|
||||
} from "../typechain";
|
||||
|
||||
describe("GatewayRouter", async () => {
|
||||
const layer1GasLimit = 12345;
|
||||
@@ -17,8 +25,10 @@ describe("GatewayRouter", async () => {
|
||||
let bob: SignerWithAddress;
|
||||
|
||||
let rollup: ZKRollup;
|
||||
let l1Messenger: L1ScrollMessenger;
|
||||
let l1Messenger: MockL1ScrollMessenger;
|
||||
let l2Messenger: L2ScrollMessenger;
|
||||
let l1MessageQueue: L1MessageQueue;
|
||||
let l2MessageQueue: L2MessageQueue;
|
||||
|
||||
beforeEach(async () => {
|
||||
[deployer, alice, bob] = await ethers.getSigners();
|
||||
@@ -43,18 +53,22 @@ describe("GatewayRouter", async () => {
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: "0x",
|
||||
txs: []
|
||||
txs: [],
|
||||
messageRoot: constants.HashZero,
|
||||
});
|
||||
|
||||
// deploy L1ScrollMessenger in layer 1
|
||||
const L1ScrollMessenger = await ethers.getContractFactory("L1ScrollMessenger", deployer);
|
||||
l1Messenger = await L1ScrollMessenger.deploy();
|
||||
// deploy MockL1ScrollMessenger in layer 1
|
||||
const MockL1ScrollMessenger = await ethers.getContractFactory("MockL1ScrollMessenger", deployer);
|
||||
l1Messenger = await MockL1ScrollMessenger.deploy();
|
||||
await l1Messenger.initialize(rollup.address);
|
||||
await rollup.updateMessenger(l1Messenger.address);
|
||||
await rollup.updateOperator(deployer.address);
|
||||
|
||||
// deploy L2ScrollMessenger in layer 2
|
||||
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
|
||||
l2Messenger = await L2ScrollMessenger.deploy(deployer.address);
|
||||
|
||||
l1MessageQueue = await ethers.getContractAt("L1MessageQueue", await l1Messenger.messageQueue(), deployer);
|
||||
l2MessageQueue = await ethers.getContractAt("L2MessageQueue", await l2Messenger.messageQueue(), deployer);
|
||||
});
|
||||
|
||||
context("WETHGateway", async () => {
|
||||
@@ -94,7 +108,7 @@ describe("GatewayRouter", async () => {
|
||||
|
||||
it("should transfer to layer 2 without data", async () => {
|
||||
// 2. do deposit
|
||||
const nonce = await rollup.getQeueuLength();
|
||||
const nonce = await l1MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer1 = await ethers.provider.getBalance(l1Messenger.address);
|
||||
const depositTx = sendToSelf
|
||||
? await l1Gateway.connect(alice)["depositETH(uint256)"](layer1GasLimit, { value: amount })
|
||||
@@ -123,14 +137,15 @@ describe("GatewayRouter", async () => {
|
||||
|
||||
// 3. do relay in layer 2
|
||||
const beforeBalanceLayer2 = await ethers.provider.getBalance(recipient.address);
|
||||
const relayTx = await l2Messenger.relayMessage(
|
||||
const relayTx = await l2Messenger.relayMessageWithProof(
|
||||
l1Gateway.address,
|
||||
l2Gateway.address,
|
||||
amount,
|
||||
0,
|
||||
deadline,
|
||||
nonce,
|
||||
messageData
|
||||
messageData,
|
||||
{ blockHash: constants.HashZero, stateRootProof: "0x" }
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer2 = await ethers.provider.getBalance(recipient.address);
|
||||
@@ -159,7 +174,7 @@ describe("GatewayRouter", async () => {
|
||||
|
||||
it("should transfer to layer 1 without data", async () => {
|
||||
// 2. do withdraw in layer 2
|
||||
const nonce = await l2Messenger.messageNonce();
|
||||
const nonce = await l2MessageQueue.nextMessageIndex();
|
||||
const beforeBalanceLayer2 = await ethers.provider.getBalance(l2Messenger.address);
|
||||
const withdrawTx = sendToSelf
|
||||
? await l2Gateway.connect(alice)["withdrawETH(uint256)"](layer2GasLimit, { value: amount })
|
||||
@@ -180,13 +195,47 @@ describe("GatewayRouter", async () => {
|
||||
amount,
|
||||
"0x",
|
||||
]);
|
||||
const messageHash = keccak256(
|
||||
concat([
|
||||
l2Gateway.address,
|
||||
l1Gateway.address,
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [amount]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [0]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [deadline]),
|
||||
ethers.utils.defaultAbiCoder.encode(["uint256"], [nonce]),
|
||||
messageData,
|
||||
])
|
||||
);
|
||||
await expect(withdrawTx)
|
||||
.to.emit(l2Messenger, "SentMessage")
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit);
|
||||
.withArgs(l1Gateway.address, l2Gateway.address, amount, 0, deadline, messageData, nonce, layer2GasLimit)
|
||||
.to.emit(l2MessageQueue, "AppendMessage")
|
||||
.withArgs(0, messageHash);
|
||||
// should unwrap transfer to messenger
|
||||
expect(afterBalanceLayer2.sub(beforeBalanceLayer2)).to.eq(amount);
|
||||
|
||||
// 3. do relay in layer 1
|
||||
// 3. import block to rollup contract
|
||||
const blockHash = (await ethers.provider.getBlock("latest")).hash;
|
||||
await rollup.commitBatch({
|
||||
batchIndex: 1,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
blocks: [
|
||||
{
|
||||
blockHash,
|
||||
parentHash: keccak256(constants.HashZero),
|
||||
baseFee: 0,
|
||||
stateRoot: constants.HashZero,
|
||||
blockHeight: 1,
|
||||
gasUsed: 0,
|
||||
timestamp: 0,
|
||||
extraData: [],
|
||||
txs: [],
|
||||
messageRoot: await l2MessageQueue.messageRoot(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// 4. do relay in layer 1
|
||||
const beforeBalanceLayer1 = await ethers.provider.getBalance(recipient.address);
|
||||
const relayTx = await l1Messenger.relayMessageWithProof(
|
||||
l2Gateway.address,
|
||||
@@ -196,7 +245,7 @@ describe("GatewayRouter", async () => {
|
||||
deadline,
|
||||
nonce,
|
||||
messageData,
|
||||
{ batchIndex: 0, blockHeight: 0, merkleProof: "0x" }
|
||||
{ blockHash, messageRootProof: [] }
|
||||
);
|
||||
await relayTx.wait();
|
||||
const afterBalanceLayer1 = await ethers.provider.getBalance(recipient.address);
|
||||
|
||||
267
contracts/integration-test/L1BlockContainer.spec.ts
Normal file
267
contracts/integration-test/L1BlockContainer.spec.ts
Normal file
@@ -0,0 +1,267 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, BigNumberish, constants } from "ethers";
|
||||
import { concat, RLP } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { L1BlockContainer, L1MessageQueue } from "../typechain";
|
||||
|
||||
interface IImportTestConfig {
|
||||
hash: string;
|
||||
parentHash: string;
|
||||
uncleHash: string;
|
||||
coinbase: string;
|
||||
stateRoot: string;
|
||||
transactionsRoot: string;
|
||||
receiptsRoot: string;
|
||||
logsBloom: string;
|
||||
difficulty: BigNumberish;
|
||||
blockHeight: number;
|
||||
gasLimit: BigNumberish;
|
||||
gasUsed: BigNumberish;
|
||||
blockTimestamp: number;
|
||||
extraData: string;
|
||||
mixHash: string;
|
||||
blockNonce: string;
|
||||
baseFee: BigNumberish;
|
||||
}
|
||||
|
||||
const testcases: Array<IImportTestConfig> = [
|
||||
{
|
||||
hash: "0x02250e97ef862444dd1d70acbe925c289bb2acf20a808cb8f4d1409d3adcfa1b",
|
||||
parentHash: "0x95e612b2a734f5a8c6aad3f6662b18f983ce8b653854d7c307bf999d9be323af",
|
||||
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
coinbase: "0x690b9a9e9aa1c9db991c7721a92d351db4fac990",
|
||||
stateRoot: "0x8d77db2a63cee63ae6d793f839a7513dfc50194f325b96a5326d724f5dc16320",
|
||||
transactionsRoot: "0xe4ce5f0e2fc5fd8a7ad55c2a31c522ded4054b89065c627d26230b45cd585fed",
|
||||
receiptsRoot: "0x10b2f34da3e6a1db9498ab36bb17b063763b8eb33492ccc621491b33bcb62bdd",
|
||||
logsBloom:
|
||||
"0x18b80159addab073ac340045c4ef982442653840c8074a50159bd9626ae0590740d07273d0c859005b634059c8ca9bb18364573e7ebe79a40aa08225942370c3dc6c0af2ea33cba07900961de2b011aabb8024270d4626d1028a2f0dcd780c60ce933b169b02c8c329c18b000aaf08c98245d8ad949e7d61102d5516489fa924f390c3a71642d7e6044c85a20952568d60cf24c38baff04c244b10eac87a6da8bb32c1535ea2613064a246d598c02444624a8d5a1b201a4270a7868a97aa4530838c2e7a192a88e329daf0334c728b7c057f684f1d28c07d0d2c1dc63868a1088010ae0b661073142e468ae062151e00e5108400e1a99c4111153828610874bb",
|
||||
difficulty: "0x0",
|
||||
blockHeight: 0xf766a8,
|
||||
gasLimit: "0x1c9c380",
|
||||
gasUsed: "0xe6f194",
|
||||
blockTimestamp: 0x639f69e3,
|
||||
extraData: "0x406275696c64657230783639",
|
||||
mixHash: "0xc1e37ce2b7ece4556ec87ea6d420a1a3610d49c58dfccec6998222fbf9cd64a2",
|
||||
blockNonce: "0x0000000000000000",
|
||||
baseFee: "0x2b96fa5cc",
|
||||
},
|
||||
{
|
||||
hash: "0x2da4bf7cef55d6207af2095db5543df16acbd95dc66eef02d9764277c5b0895d",
|
||||
parentHash: "0xde18012932b21820fbb48ef85b46774873383e75b062bc0c6a4761fbe87bad13",
|
||||
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
coinbase: "0x690b9a9e9aa1c9db991c7721a92d351db4fac990",
|
||||
stateRoot: "0x1f101f54c3df5630c9d45224c95d71a57479992e174cdbda0c4ada30e657a465",
|
||||
transactionsRoot: "0xc2b29438a5f55998879356cbc8006a90d2ba88a9841b3894c8da5840dd797f19",
|
||||
receiptsRoot: "0xbd3608b6af5464b446db44fd289a980f417447b31ff15dd6d48c72fc8f4fef8d",
|
||||
logsBloom:
|
||||
"0xd9e5f4f1e559388eb8193295ab2d3aab30c588d31e381c4060715d0a7ce607360b15d7a0d88e406c60135e0abcecd1d816c11f8cbbb2a80a9b4a00375d6cf356cb78f2934261ab09ea03df29dab5dbe4aefea506f7fd0eaa1a8b1fc8db5079613a49d80ca7e7997a20c7158399022c1dc9853f5b401b86587249fc96ca6fbc2dab1fdeb203ca258c94dd0bc821b38f9f60128591f3cd224c5c207b76b754e537bef8ebe731effae356235dd71bd7b5494bead124a8b5bb0ba02e46721d3ec3c20608880b1d35a17f6a1027d20c7b902e5d7b2ec8177b1aff9dcfbb4729d1e3201e78fa1b3c30e66a590cb5a7cac7afe0b0b1a6c94d5e39c9a20908358b805c81",
|
||||
difficulty: "0x0",
|
||||
blockHeight: 0xf766d8,
|
||||
gasLimit: "0x1c9c380",
|
||||
gasUsed: "0xf8adad",
|
||||
blockTimestamp: 0x639f6c23,
|
||||
extraData: "0x6275696c64657230783639",
|
||||
mixHash: "0x6066061b78b385483d960faa29ee40e79ea67769f5e697ecb70a0fce677804af",
|
||||
blockNonce: "0x0000000000000000",
|
||||
baseFee: "0x2aca8b608",
|
||||
},
|
||||
{
|
||||
hash: "0x4ddeee3e8d62e961080711e48d8083f164789e78cc90e4362c133063b566d64a",
|
||||
parentHash: "0x9d190c6d49352d628e321853967dd499d78c521daad73652ed1978db5652f58a",
|
||||
uncleHash: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
coinbase: "0xcd458d7f11023556cc9058f729831a038cb8df9c",
|
||||
stateRoot: "0x3620665f9d094aac16e0762b733e814f4e09177a232f85d406271b60e4f2b58f",
|
||||
transactionsRoot: "0x200f5acb65631c48c32c94ae95afe095134132939a01422da5c7c6d0e7f62cb3",
|
||||
receiptsRoot: "0xc140420782bc76ff326d18b13427c991e9434a554b9ae82bbf09cca7b6ae4036",
|
||||
logsBloom:
|
||||
"0x00a8cd20c1402037d2a51100c0895279410502288134d22313912bb7b42e504f850f417d9000000a41949b284b40210406019c0e28122d462c05c11120ac2c680800c0348066a23e7a9e042a9d20e4e0041114830d443160a46b5e02ec300d41330cf0652602140e1580b4c82d1228c000005be72c900f7152093d93ca4880062185952cacc6c8d1405a0c5823bb4284a04a44c92b41462c2420a870685438809a99850acc936c408c24e882a01517086a20a067a2e4e01a20e106078828706c7c00a0234e6830c80b911900291a134475208a4335ab0018a9048d4628186043303b722a79645a104c0e12a506404f45c428660a105d105010482852540b9a6b",
|
||||
difficulty: "0x2ae28b0d3154b6",
|
||||
blockHeight: 0xecb6fc,
|
||||
gasLimit: "0x1c9c30d",
|
||||
gasUsed: "0xb93955",
|
||||
blockTimestamp: 0x631d8207,
|
||||
extraData: "0x706f6f6c696e2e636f6d2050cabdd319bf3175",
|
||||
mixHash: "0x18d61005875e902e1bbba1045fd6701df170230c0ffb37f2e77fbc2051b987cf",
|
||||
blockNonce: "0xe8775f73466671e3",
|
||||
baseFee: "0x18c9de157",
|
||||
},
|
||||
];
|
||||
|
||||
function encodeHeader(test: IImportTestConfig): string {
|
||||
return RLP.encode([
|
||||
test.parentHash,
|
||||
test.uncleHash,
|
||||
test.coinbase,
|
||||
test.stateRoot,
|
||||
test.transactionsRoot,
|
||||
test.receiptsRoot,
|
||||
test.logsBloom,
|
||||
BigNumber.from(test.difficulty).isZero() ? "0x" : BigNumber.from(test.difficulty).toHexString(),
|
||||
BigNumber.from(test.blockHeight).toHexString(),
|
||||
BigNumber.from(test.gasLimit).toHexString(),
|
||||
BigNumber.from(test.gasUsed).toHexString(),
|
||||
BigNumber.from(test.blockTimestamp).toHexString(),
|
||||
test.extraData,
|
||||
test.mixHash,
|
||||
test.blockNonce,
|
||||
BigNumber.from(test.baseFee).toHexString(),
|
||||
]);
|
||||
}
|
||||
|
||||
describe("L1BlockContainer", async () => {
|
||||
let container: L1BlockContainer;
|
||||
|
||||
for (const test of testcases) {
|
||||
context(`import block[${test.hash}] height[${test.blockHeight}]`, async () => {
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
const L1BlockContainer = await ethers.getContractFactory("L1BlockContainer", deployer);
|
||||
container = await L1BlockContainer.deploy(constants.AddressZero, constants.AddressZero);
|
||||
});
|
||||
|
||||
it("should revert, when sender not allowed", async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
await container.initialize(
|
||||
deployer.address,
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
|
||||
const whitelist = await Whitelist.deploy(deployer.address);
|
||||
await container.updateWhitelist(whitelist.address);
|
||||
|
||||
await expect(container.importBlockHeader(constants.HashZero, [], [])).to.revertedWith("Not whitelist sender");
|
||||
});
|
||||
|
||||
it("should revert, when block hash mismatch", async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.parentHash, headerRLP, "0x")).to.revertedWith(
|
||||
"Block hash mismatch"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert, when has extra bytes", async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.hash, concat([headerRLP, "0x00"]), "0x")).to.revertedWith(
|
||||
"Header RLP length mismatch"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert, when parent not imported", async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
constants.HashZero,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.hash, headerRLP, "0x")).to.revertedWith("Parent not imported");
|
||||
});
|
||||
|
||||
it("should revert, when block height mismatch", async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
test.parentHash,
|
||||
test.blockHeight,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.hash, headerRLP, "0x")).to.revertedWith("Block height mismatch");
|
||||
});
|
||||
|
||||
it("should revert, when parent block has larger timestamp", async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp + 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.hash, headerRLP, "0x")).to.revertedWith(
|
||||
"Parent block has larger timestamp"
|
||||
);
|
||||
});
|
||||
|
||||
it(`should succeed`, async () => {
|
||||
await container.initialize(
|
||||
constants.AddressZero,
|
||||
test.parentHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
test.stateRoot
|
||||
);
|
||||
expect(await container.latestBlockHash()).to.eq(test.parentHash);
|
||||
const headerRLP = encodeHeader(test);
|
||||
await expect(container.importBlockHeader(test.hash, headerRLP, "0x"))
|
||||
.to.emit(container, "ImportBlock")
|
||||
.withArgs(test.hash, test.blockHeight, test.blockTimestamp, test.baseFee, test.stateRoot);
|
||||
expect(await container.getStateRoot(test.hash)).to.eq(test.stateRoot);
|
||||
expect(await container.getBlockTimestamp(test.hash)).to.eq(test.blockTimestamp);
|
||||
expect(await container.latestBlockHash()).to.eq(test.hash);
|
||||
expect(await container.latestBaseFee()).to.eq(test.baseFee);
|
||||
expect(await container.latestBlockNumber()).to.eq(test.blockHeight);
|
||||
expect(await container.latestBlockTimestamp()).to.eq(test.blockTimestamp);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
context("message inclusion", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
let messageQueue: L1MessageQueue;
|
||||
|
||||
beforeEach(async () => {
|
||||
[deployer] = await ethers.getSigners();
|
||||
const L1MessageQueue = await ethers.getContractFactory("L1MessageQueue", deployer);
|
||||
messageQueue = await L1MessageQueue.deploy(deployer.address);
|
||||
|
||||
const L1BlockContainer = await ethers.getContractFactory("L1BlockContainer", deployer);
|
||||
container = await L1BlockContainer.deploy(messageQueue.address, constants.AddressZero);
|
||||
await container.deployed();
|
||||
|
||||
const block = await ethers.provider.getBlock("latest");
|
||||
await container.initialize(
|
||||
deployer.address,
|
||||
block.hash,
|
||||
block.number,
|
||||
block.timestamp,
|
||||
block.baseFeePerGas!,
|
||||
block.hash
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert, when block not imported", async () => {
|
||||
await expect(
|
||||
container.verifyMessageExecutionStatus(constants.HashZero, constants.HashZero, "0x")
|
||||
).to.revertedWith("Block not imported");
|
||||
});
|
||||
});
|
||||
});
|
||||
120
contracts/integration-test/PatriciaMerkleTrieVerifier.spec.ts
Normal file
120
contracts/integration-test/PatriciaMerkleTrieVerifier.spec.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { expect } from "chai";
|
||||
import { concat } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { MockPatriciaMerkleTrieVerifier } from "../typechain";
|
||||
|
||||
interface ITestConfig {
|
||||
block: number;
|
||||
account: string;
|
||||
storage: string;
|
||||
expectedRoot: string;
|
||||
expectedValue: string;
|
||||
accountProof: string[];
|
||||
storageProof: string[];
|
||||
}
|
||||
|
||||
const testcases: Array<ITestConfig> = [
|
||||
{
|
||||
block: 16212738,
|
||||
account: "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
|
||||
storage: "0xb17c5049c06186507ed9d55e735dc0342e08579866e7ed881de010624b3896dd",
|
||||
expectedRoot: "0x5dd9637058e605949321a683ab1e6c56ae6041a05cdf97355696f93309799391",
|
||||
expectedValue: "0x00000000000000000000000000000000000000000000000052ab3594ab17a60b",
|
||||
accountProof: [
|
||||
"0xf90211a04cfe239817b200a743356abfc6e5b08d9951e90f3932f57a7c12014d9968b040a02c94e10276ccd6a461e94da963f126e396d12f50a3389966705dbb0ece7f67aca0f28acd17ade90c99e92e3e155a46076ef89f51f22caf45ec8f5affc240073cf6a0f26e26128daf3ecbb7a37eb10afad22741725a1ce43819f1f573da6f1e6fc2c9a020e3325c4125cde3a948d7a68530a8f8979591c17f445bf96b4716d64833f6c8a0def41ac472c300aed57feb95cf7426fcca53d4c0007afabfb0d6c4d3b4ad95fea0a65435daeb1a371b29c3037a01230d19872e2bdb1a97aeafe610df01dd9937c3a0c4d93f1c9037597d4b07388551773f9578203a8abf4f3bfabd6eaf58070f32d5a0d008f86640c7313e00f897b2b9416da54ea2182fa98785e583367e42035fc0baa072981aa04d506601aeb2cf8689ff23dff82a52a29e1d401dfe96baa2550b977ea065a9e75f35c97436334ad2498ea3fe4296829ad7b005e65af34fd10ddb368631a0b326e41a44cadb3e78fd84571f5e0f9da6b5ee5dfcfb1c88a6b1fcdb13fe6beca0e32897d4de5966ed95729c2a68354d1ef7f27a9b8a5cdaec592965bcc5b339d3a0022b816b5afca265766e67427b45682ade004a780e7e3667b41930a1d087230ea0dc0eb205c8cc3642fe2f03252b238891819a8528f30fc0207c2271290e8de9a1a0554966428442b6b9188a4b4945aa550f32615e4b1b1d3776b07c9d18ba0146af80",
|
||||
"0xf90211a00ee4696104bbdba0094ca30daa4eae871b1dc0c2ccb07d8f24c7c89193904607a080893f1dc4ded5ddfa49eb36a618a0c3525c58890ae7e4be006e6988980cd15ca04ad58fd70d3cabb3d59144f729105997d3f07c0223a41dbc9b1507d1aa7825cba03bbe2d774e64d6577f1e90ef42b20664b4707649b00e957e369e97a1f03dc762a0107ec21d49839dbbb267fe5ca58c615af81df1752b7f49c8ce2da952a132cebba0d4bd3d22a406960040f86aa8fff6c9e29a2337560145730f9e3612844d67dd1ea09b1edb047a63e19ba02f894a19bfe2d4fcb9052e0dddd6d40dfa52351b3e973ea0a397a48dcdbeef118776a2cbd06fa400d1bedc99ed9f61d4f4513cc6aa7c29daa031f5b24b9027eef2c12015db45ef79c6f57681a6818173e00ddb1050567be4aea035748b7d80884cd8ee2d53523e9aa49186091b28dadd09e1a4f94b8ba3e3c995a055f851741c59287e048a908539c348c6555c098ac16fa074395d530b67f076b9a0f189025cd5b04a3b73bcdbdfa0d9637a0ff389f7b9a481efc2cb984484cb106ea0d7e874ea3b71239bbdb6f01287f58508816b270a218293987e42618f6b982777a0447c72ec8a23e35ba10b61462c11c654653556682994de6ea7866a538320fd3ea0d52ef935a9abaa771614224e75c9d06b8d94270a5ab11b95635f3d646bc7f80fa020d93fff55bcd20b764b2a4261b448cac23fa19dd64dbb7d33345a27b1c02dce80",
|
||||
"0xf90211a0aa579a2bef0815ecbe72dcc7a478494f4ddf6e6a821fed8b8e5e22f96be95fb1a0f7be1171a1b188f0159315731796369ea543043b3f2076ad688f2bda5315d4f6a0ac7901c3cece0eafdb607bf3f6981aac2741804c77b0db674d7bc69c6e0841d5a0c1bf87d0fc7ff63bc43bb453920d13b00ed2e126931fe431206519e47f2aff58a0fbb3f885d4e17a30daad80568b76ca24b70f95ddb3840598c9cbf5499caa13d2a009566520886f90ae776076398c3393585110ea164c8a1e6c47980ec67fbbbf9ea0709eec3f022710443237d2ee3d967abb9fe295b335dbc783768cc2396ba0b28ea02003180468280c9bf5819207be30c9f3176a0cd68a57b43fe353565a5d42b62aa09817a0745b614df9aa5268081c06eaa5d6c057c86e0253ee26f081b9fc5487a1a073265752f6c91428565dab106305f47b8c609522ee518b4f391c7f8951f5394fa03ef7529bb0ee4030c994910ba8d8cd0eafbfcc4d7f7a0fe9b528b09360ab12e0a093330c4eb263124f35f26572747b59957744f1c39cb91e413b599d27e07dcaf6a022dec6cd45c7db6901c364be4226d54fd74552af51d835d605d1efde50a374c0a0007c30f8707814de913a9edd9bf09fe614676b2ed5497ea06bd157e5ec1718c2a0e6d9335dee9c32e74ae736ddccb15bbbe3ca07c347e7d038a6103877d1cefd31a02d5576458404a2e48f2263741a2c5ff181ff03939e1952cd616412c98edacdae80",
|
||||
"0xf90211a0f10b8f4ec168083a8021ac8201365f45d63ad461fdf4cf8c50342499f197f5f3a02341a492492fa8323462dad1af3ab7094b76ae3666c4d081ec8d24c9e0da451da0017ce2794246eda28f5b1b3fee05dd269dabb29f71799ca7c3dca67764132c82a02b629e4b9b699796651ad13840a0d06721041de42d09f22ddf3e0f7c89ade82aa076d2c3f907c842c8e76454503b7ef1c9f52e93fc3830b4b8cd63dadeefa8fd4da09284abd6431d107e527627dd78d3cc2a655f7e364470ef620fb7fada3fcece73a00afefb47543ea7c9866f274ab4aa14ee261ffcd0c8b7c8c4e66f1ff02eda6ed3a02045ebe244660a6cae3467637f3e0b27c003cefe72681c47acb2e9766c1f17c7a08fc1ee83563261f1104687cefe451fedcff6caf2dae3f3a2a382b1a0bad7109ba00afa5fe38079cb86511e0842b62af452a78ecd43dc6c54351ed3ec923769088ca0a9c36efeb72874a37dd282f309ff318b6e9464ece30c63ba80bfbc5e5f76f163a030b918045e6233a81833658889f54cedef0f1052aa56e673649a955bc8fee94aa0eae7097667819b85f90f7d4a72d9a8147dccf5fbd744050a743f029e0a79c725a0671e20fc1d94cdb487e0a8cb8c752fd3b4c2f759b9a905134f7a880e1dcdc96da0425857c455a0e10c7cae230d3b3a3309ff5732b55ca2186cc9ddaecff5460490a0b10db994f51f52b29f43281c561f2c62f2be921c5f585fb441760ce9aa4d3d1a80",
|
||||
"0xf90211a0fd942eae2655a391e59dc2779f53209542fcc140b96e4b93cff3d8cb417e6efba0bd3535c9bfa5a7b939c7dff9307610a5958f8a785d2dcf7eeaf84624d0e457cca05ce0a4917922d7b302fca1badd446897f360b11d60be9802c45136166a81dc79a0731d140390c684a63ecf3ba9d392c73b8fb1bf2864d4b90eff813e953f66ac4aa010bb21166ea999880a179d6669704ecf6c50ea9e47eb674d9b077a7d4c4f9baba085dab7106099e19e2c978e8e814a7749af5bbdbe1131333713e612898a8d62c1a012720a68371573fe69f384950b871b09a44af5fe2c4870f231a58e07190c1b36a089e816024bd04ad03ca66e47323feaf5d975b3ec41b46fb124ba9a9299c26da7a0827ecf55875811b3b25696b3737ead4817641d29ed46d5c4892d098357b699e2a06450a823c9feb0adcd77aec2d3156057f2c93f83670da26afed344e2c6a8f5a7a045fd2f25ecd36a65186513e409fa3b3e3f3a0f7f60f5951c76d2ce10235db1bfa06819009da16eeacf224ce65fc7dc8052cc2f4dd32813441801ac3be9e9db98c5a0ae81fa6db4342f607a35aea6a10047c1848c9251d87140efd6c24685ab964b08a0ee867ebe92374b199244599920a3a0fd13ca24030ae6c1d1af1ac8523a8968faa007dcd579f048937f2bb7a388a158f565b3338e35d37f455d2d6861ca208183bea0dbc271c1b2865a38476161513c4a590807f8db6f2a4de8db1e9c142a8a15349580",
|
||||
"0xf90211a02b207484d2fd6781a1e4ae62c2c4171477bd5b929df2b14904cd4f36c61363cba04cbd3a34c4d4f60bc5590d8b5859da8ac83ea7a8a0197dbbc528434651b0f748a0beafa9a7e0b2073100526355a341de7a1a839c7f7322a594bdc9ed4d73d72283a0249717659c4e7adda14416a804ba5c9b305f9da2531a3ff6e6d74fca6380f4c2a09b5d4bcf5c805d1c38f283bca39ce28077cbe0daed23312d666cde49134a4d2da03930a91cdfb11a85632972832202e0ab4027f78049f828a099327513be660ed0a0ec6a17d51d787c382575d6798093a015e8383bb276b6fb291d529498789ada09a0f54c88077fa118092db43a93d89c86ec879da12d33e6e5dd89b10b7fb115bc54a0e1a3af76bd6a0b1f4419a62bc73439c641c612a912dc8d190e8e81c8c15dd561a097934d75e361d115ea93e2fdc0c91a54d59414f0daa2ac1991b6651ae6571f9ca009abf1666d7d9202849314692d5ce1e51e5629727701044b37532ab3f9be50c0a094561fbec829ff4807911e0169bcb59159bf8d478fe7116cd652c179c28342f1a058ea9466450f42b25cc3298911ebeb081b6bc73f3c414f0d36244d331cc18c5da0697343bd56fce1c2d34ebb3baa06b3f5aba4851e3b60436e545a2616ef47cb73a06ef38fec665b8eb25934622af1112b9a9d52408c94d2c0124d6e24b7ff4296c0a0451066ddc0cd1a63e22d096eab595e74c8e8509616650d76a0eedd35f0c228b180",
|
||||
"0xf8b1a02a85b6c4adf828a068d39f7bf4115a4544ebf32e007d63957a28ee21eb8dcd57a0344f34e01710ba897da06172844f373b281598b859086cf00c546594b955b870808080a0525e7dd1bf391cf7df9ffaaa07093363a2c7a1c7d467d01403e368bd8c1f4e5680808080808080a0235db60b9fecfc721d53cb6624da22433e765569a8312e86a6f0b47faf4a2a23a06c72cff8105f47b356034e5586745859f6290eb366bde35b9e819af9dcdfdd8d8080",
|
||||
"0xf8719d3da65bd257638cf8cf09b8238888947cc3c0bea2aa2cc3f1c4ac7a3002b851f84f018b03235ac0b3723f4d6c6f61a0f3ea73ed7d35e887e1b2b8ac13e8645eeec0da8210c16da47b0f3b0894011c3fa0d0a06b12ac47863b5c7be4185c2deaad1c61557033f56c7d4ea74429cbb25e23",
|
||||
],
|
||||
storageProof: [
|
||||
"0xf90211a04571622a123ea7cf0d9534115e5e6b2fd058f94306979a373b226979a8c83af3a0293a081f517366f69769840098d809396caf7ff3942c3b16aa641b23723301b4a0605ef8aa3eb98c75406d2781067f9d55804b4cd981614aa09f9f6cb0d87a91b0a09d7f20c3afe36c59119c1308a6d7a3efca7c6588acc14364c0e70b5f7f5ecf97a0ce1729eeec5fb5d9d3fed295e469da960bce62cbbd4540efbb0eaf470b0014a5a0a69bd31a7f4267359dd41b93f03b949bdf4de072651b6929ea4e756bc6f088b6a0801ba6fed2d48d4706569a62678fb93ca48dc159fd8659b7100bc4070e3f24f8a0a58273972230f9ef6f74f1d3d1baa8795f82d0bc2c2313b7522a35cfad25ca7aa0be46e098b427907021d82e9d1d45ca4ef6305e3adacb71683f94e4656718ba14a083808d1c8c0ca4a5668cbe6faba42d927ef8df07f3581d06a9381084f0590defa00b6eaadae4a3d219a0e090a56cfdb17e31326e9d60802cf3a36e8ed0f14490f0a00146a284e0a8245d2c1f51ee97fdf9f4231caee252aab01fcf3c4a619f39663fa00b68dbe3928080b43cfc2533fffee4ed91abff24109f08a3ba26e8aaae18c7cca0345de27acef95642cf996a0485bd0242281c7ed9fddd6bad6f55e6bff04588afa092099ec8d9e6dfea3ee5fe4ce7b18f9e513cd7229f7a8de6ebf93ff5ce757232a0963d3dcfec3a80dc1073eb2292be246d81b4462b8347511d335b4c537f87c29a80",
|
||||
"0xf90211a089a4ed194eaf9e272c155d2e692b5585c6a38bd04ae96e487bcc231771701f98a07a7de6dadac670c4062757c16976c4fd98c587a47a687b32b640375fd7e825b8a0da765585e24133176d2b38376f362b666800735c46e6358bdb526d03f068f97fa08acba1cd699af52508c374da47250b1d2be1a43a7d25aff247ec717b8a534213a0e74be231dfa53a30bd3157e6f702f14619887946e2a447d31dcac87f391a50c9a0b8448e3cc5dd4e9728c7fff44ec252bdade1618a63d363e86e0e6dc4c77de5f2a0f95aadc2a07fb025f3492fa7d15224bab718a908b1fdecec39900f905273d8fea0b76a4d3edfbf657e6d87e2e3920b478fb8f4bdba7844a7ab23798e1bed4abccba0fd70d97eaebf9d1b9e65dcb960bc1b7e96b03a40dfcd490ebf8bc5bab8c413b6a0fb3fecd1f77557f554c6d22b86e9dfb27fe644d13c8e53c24b64e7b3f3791cd9a039cce3c9632ea42f008bb8fd3412e94dea053d4a2baa41c4a2517b34ba8e4405a066b4b4db0e22d9fa76395494b571b7c0cc1cd18ccd332e8a59bfa03b2be2889aa0a80a5acaeeb595a5740f1844d32eab4d56fffe53176c21a464ff34a8cda84101a0f454d635fa0657c436c5fc2b6a071c62e4c01c139dc2ee544dd8997f2ee9242aa07fa5c3c8e2be0f1255f49383046703291953d29debf61376f862edd3c5b4cf76a0a30f1b5c1c3c4b307a2ac472c81f79283803e88403a5ccee7750ce7175c0b0d380",
|
||||
"0xf90211a083f3f2d187ac7939ccbb8690863f341b252909afec4dcce275a2e7318e1f15d2a08fdbf9e41ea870a7ec2aa31ce43a682b8e2fffd0988bb934c03dc14e1988952aa04b9e7db219d192320bfdac399670cff992e0aa5dc25d2f3de56f4f53e5373456a07f27f9e5efb3a92a1f2f3e6d8fd4bfaf9015b9fdad8715ba16d30c211aa0530aa07cc6af0533c32fe1af0e5d4b149186970040ac5c69c2db7805774a65532fa064a0f15e9c0dbdd4f935d3aa719506ae1fb7297258d18abe03111d9e5221d6bfb8cda04572757dae6365a28b493c63503809a9dd6927b6e6f11f791e9c2cec92b80513a0d1ac01dd696504ca20c087bea731dac1b8c48d26e5dad36d80e34496ee20b46fa02d879c981e1706e0720b3efa7093308a499d57ccbf9648cba78026b3e7883795a03f007ce733ee8a522776e46bbc5dd28ea33db0ae4702d733926d83b28c4d0181a01b1858a30125abe3a401112f676d6a4b669ac9495b34f89691c075ec7630a45da09d22b122a2fd0db8cc2397c0c8e05fe317e3bc8aa407af8b85ca300d9411dc0da04ad97d66e54c7a2a76bc6729384080115dc3ba5e6a7c5269470372ba6d22eeafa0dcfe09b848078f66db11284e093991436f85ef26ddb3dc2efcf56e4bf05e6101a0e641c7a710a5b8a3b465e05b09e4868d9e54353b50d29eeccc9e829ea314041da063ba309481ffd1118153e75496d66bc7a96d37f32c63f4e731e56abe4fa5f12880",
|
||||
"0xf90211a00a62828ba9909a92bad0ddff29537a58e176fb8af1d76292813a72f5661ea282a0f037cbce7cbacb3343cdf899fd145917e7cf18deddf5b2d8a94027968f9f1624a064774630a8d992b0888514b5e1dc2fdd37b8a214e6bd39d3689eaf74bf65bf68a0b6ee7661ab782818ac639c03784ab65eecbb06d79d251cd8c25627e51ba5b94da0c1dfabca29a2ae57d88e29f0ea94bb3a825d4b884c7f088ab4261b5900635ecba01bf409b8577e89fe49afa62ec117c32a9beac5f8e8cce54adeb3bd501c15cb80a08d7b60700564e51011a00159786683d707b676f41214b3e538b074fc79484748a08e58472318ad40f9498b98a599d260a80298a2cba39cf45d0bff8d91ae2e4852a04443244bd4654d707e3700d112783b837070111ba8a2f0f11781d623c3990754a0750eac11d5f2be0746f87df3cf9849ccb8f13c831936a745abd37fc464d758eea06311c8c2cbdfc4ff1a7e550477cf38ddc35cf57579d0f842801a9ad6fe50c45da0c6ceee02d855cef0db230d186d9e37b8777b8313a22b3dd6946143da503919d4a08669ea1760b9551901c57fd56411368ed8de861bb4602d26f93005d0101fd195a0285993aee29c28d2239022fbda7df02d06082e0246431b7671edda601c6e5cc6a047bfd76124562bb812ec81f5b286e09907eba7e9b1efa72d4ac7a49b82eed957a054bf6597873bf09bfd3df04d4fdff771c02f9d728d51ed1ef00f6b053f3282f280",
|
||||
"0xf901f1a0c5a1504268a750c1c90b7841d99e6934f977193c72d44ba456fc9a263fb3ea45a0924bbfcbd6d2e7a3f9bb5ec1898a1ec0b98880f747991e96696bd0b565e1f83aa07ccd4b2cea9ff079bea41f9d704c21e7f9d3fbaa83895f34970585873d5bd9e2a0b2e313a02508e8a0dfa115612c1400f8cf9d5cc23369b6aefd7c1fceca7dc943a0e19964c5618fe9f1f590eaddc17787071442649385109b9324beb8bf51a0d2d4a0b022d54d33a1c62278d7784996fddb4c7dcab2fc3c2287c6840edc3762e3d034a0a8381f53de80c0d06ca7288457d82fc1cef37af3e08abbed93a61d48d7c9ca1ba03f916faed29b999d16e22fcc2ad463681a42339b24fdca5a1323b5e55d5650f3a0eb6adbd0b998ec882b91b44ab6ccf20050962c45b68d4e42d2f0e3e1c9384952a009190c615b4dab60e7c1940f2b3b87e3636a655b29dd8b65b99f497ab4fbc395a0156deb01c2c14daf7c043555c077b4af3c5aac031d75cf9e4f704280983c67c8a09dd3b43b4514cfa57218538527defb69638f108383a9d95ad07a296d30bd5bbf80a01316d876cd6803dd122538f308cf116b79278393d979769a121f8354c925cda0a0324232c83f8194263838f7105b67fb93b805c027d6419a98f3c40937b9502132a0cf19102ca5c74f4e088ca39ded150e7a9d5d1bc5d9263012c7e843dfdec8386580",
|
||||
"0xf8718080808080a0795b2bc0fec80623a0785ed76761d1e9abbf37b806b4b1664a22c1dac557d79080a09831b7f896628cd55e9cec00f168d92c748a1dae2fc55774f0fdc80ae64294a08080808080a020edc6edb75de3cfde19500957b220fffbfc581e93b5b6e307fac078a8b14783808080",
|
||||
"0xe99e20e18d2fc45a3ea90621b218552f932e0a2a920a290d1c6bda98db9ab133898852ab3594ab17a60b",
|
||||
],
|
||||
},
|
||||
{
|
||||
block: 16212787,
|
||||
account: "0x9467a2d9c07cebce3708ca32eeb2b9219aeb31b8",
|
||||
storage: "0x000000000000000000000000000000000000000000000000000000000000000a",
|
||||
expectedRoot: "0x16b9e5246ca2dad361d440d5524cb431ca30d0575fc21f4e4242f7611fa2a212",
|
||||
expectedValue: "0x639f404f0000000000031d02a5d2b33515ec000000000000072629ee1252f3a0",
|
||||
accountProof: [
|
||||
"0xf90211a0aa686b484fd06fd6a76b4b37cbf3965553120d61b93dc354e1e32e3442fff947a0c8401b3aaef041fd79bcf69bc8eae7220b1932973d088c368422b43e7fa99d3ea03d14c01a86a93d483dae0f088ccd5f64ee3346bba6590bedcc6ed4975d36c0c6a0c64f3e49789294f22c3cb3bfdc78406933b8a47f743de5c999599f814cd8d166a080205a023284e4f9905946076d9dc0c029fca1452743becfba43ae49b0c09d18a04e13c9c6719f3519cb7828514f1b0e393398c7dfb0d703980062e52a3faffad1a0e806c685e60d3b312f1e740422728358f9992e4b7cf62c904c8c01265e88fac0a0f21e7ee12a407fe11cb0950f63ef5dcf62d26fa599f40136ec057c684ccaef73a0bde4594be3b1be7c4312c6ecf81ba8cd8057331563feddd4fdbabf3c67385fbba008ff9a89a68d8a8f6cec81a8553ff72043c4dcdc1ce784874c3fa5e76916f4eca01c5e489af3e55abfdee369a10075b761f58be65d5d589742ca8c6098db88e9c5a05b212b9a9b393541dec0d34c4908a194ccd8c6a21063429521308840c8b66d32a031052338c42361d910eee1c3ec4b7be3400c5cd97a7f8aabd3f5ac81da0c8395a0850317a18f8494eeab20c8015e5d863b43587a7dd3a7efd41a921ff62de926dda09e6e76b343415cf3105ecbd67e99f004b31eb7123f3e3a614ad808557d78c34fa030915874eb78ae682f3d74a727227fa86b204fa367256fd4a50767ed4c35bebb80",
|
||||
"0xf90211a0122c3b5a88702fe6bc3d3464e903d0d1aababc35f259eac6b9111e5b753de6a0a0bf670757a4652ae24e5bd2fe9cacbdda79924bd6091330b950b1473dfec103f3a090ee1dba46441ba0126608d28b0023f0ae8401eda749e90d8550f2d3ca4ccf1ca0deb3887fd765e1c5db19b353dca2ece691dfc2f2c7c0a1c298635e3264d8a05ba06af91d067bdae7d64e34b2d654b08815fc43bdc4193482e9aa58e1fd852841e2a02518d875bdeea78fc832724ad33bbc66a654a1670c6bdf544a060941f90a31d1a0d7e69dfbfc026a105ec5ec68062c6affc1115ae3ad7a70e4ab854f9c914f2cfba0611e45cb73f473325c3d0ad494927e1d1053614c17cec3dd04161248305b3c9ca09767470f4299e3dbea4978fc989ca44abdef26602e3351cea0ef2885dc0e66baa060176e7f197f28205684e6b5ccbb83c5494ac86ef5483094fa3480728b11bf63a0c038f27c7e94887708465bf77ff37de506f5cb29e9a355d4b16d426e12f2bf59a080a4b6849ca41469ec77dba2d4d3ba0b0da9a36e5a6c0451e588a31af5981179a0b7fc37446eafbe040ba963a25e907af5a5d1c584d31198c12e28499a8377b249a0550e5984cd4ee2beb3b1d2af589e0a4954d8da7167896ac12985e1d781e3e98da098ea9d1574fc5431dd7342ea8467c5369ddee70b33ca37f30230e21d9a995d7da06cece45972cba1083ea30c7563c9639d398749575ec229e634f79e1ab637dd6c80",
|
||||
"0xf90211a0a3afae41153cd80f43b9b413b8fb57481fac6882c1f6097117cde8f8aaed059ea0730760d301e2b18a9cd4b3f777d91bfff8424bf64c05adceb8160532728cb699a074588c944add6aba03154d7bd8b543f149dd9629f46d8da52abc9e41be988a74a0b8ae67ef514d0dad520cdc9103c2702ad40a7b0c343aab9be74d72d568902540a0345dfe1d6b3fbb5c9d0aa731fb083d5db76b4dfe22d5b1a789c78a921589082ea0cc5c0989644c549f573ead05887340e201e92f7a5bd9cfe7b57e3bb46d47613ba0002ae2795f3286b54b45e25fd66ed6173ba4bbe56393f7f27407cd559a2d259ea018cce2547825efce8cf5e6fe14d88cb7899a1d8768dae861c0e263a06640e5e0a05a6a075ccc448ab78a34ed3ee7d56a1b179a046be98a1831db18f43637638d04a0fe2b2ac494af3af2c28198dc97bfd165288108e0d2eff941cc5d115461c799fda02d1de5eb58ae72173353aa94335766bb360eef79b6925fe5f254f0e3caa8941ba0b63901c2fd1c61292d32f049dd699bf39c4019b1ac7ab12907804a1633d288b8a0071290317e54993ff32e0ab04d28b920105eeadc917e44449c4ca2fd80adf9aba0fd86afbc5d8ac6357d6ba6f13f0d08737d1f95d49bc1ef1d19ddee3dbc4188ffa0b1cf7db5488cd60ae077821f0aec741b51f8e8c553eaeed4524159373aa98d7fa0c695f9be60487243c29023e469d7af9e37661e325a577247516475e51d6757de80",
|
||||
"0xf90211a090b58facddd3e83bdf8b1553a2c42b07fac5c1da069c73be25f30619088cb480a03867abbc8789869f4b7b5cc4799980299cc3012ec7fce70fb7dea2e5995a9a2ca00c3948797fbbfd4879bc72b5ec1eeba993bdbf4f8b39ae8f63c94cb2dfb89916a00796ca2b7894372e41a3331413a5e776eaaffad05ec03e240966e7ba8330f045a0713935c0c8cfc67afb8a35c948b4239710a5e7d61b5bf9d4e3d6e88e4e7aa28ca036caba99dee8e52ccd1ed12972e6c3ce4a28e160bd7542349338b692c27b5a51a02d1d87889d5e1c16690ac8b7ff3642f6814e42fe6cd6e00e108b759555f2cca0a0cc4be174afaf83b4b1d4fa64374817759956315fb684326fafeb238a41fb0ec8a09dabf40050d9ed69f994f8b82f14e037dec59c6a2a24a9879e184b546ea71448a0c77815db0d8d7eda3df1b8354ac007fd93f6190f20616e7b93259d89f1b0ac6ca09c105e9c25f2f480ef8a50c31bfdd0eef120741c9a1caa6f2278ab7fff0e4651a045ef65a0c419433050e6cc57892fac712cd3cb835da30f2f8cc249b872d6274ea0a457eef99c7beaf2b365cfac520db40b375a0707a0aa7bf234a04ec5746e7daea0e4d2b13f79715813fafb715534ed0d1474e044c7521694ae3bb1475e7d570f42a034143e125fb181ec980641ba63a9d19a005eca2081bf1e1e77572c172c8481cca04747c648752a28511842c2d63410bc6a554ca6d13aa3541edd6e7759ed62b2ac80",
|
||||
"0xf90211a02cf6e48c3852fd7b3a31e6922cb756425da526a164faa2b32f19b21187503ce3a093f0f615e47ec246a5cae41dd6236374287e3efaa9c17611bed4f2621f5ea7e5a0d6c55b3818c48f66570964ab6f184094948ea1d808d26a66a6d0e8195674d143a0ac7dc18dead02fbd3763e5d5fee4d2c032ea207df6bdc26900f0d10ff2c47f8fa0c037ea2e7608348529093c9b9fec3b32d8288bd0b6ac3ae242443f4bda8e9eefa028ead29005c86ca93d969b2963b3eed06ec81dbe7c7c3064d79c6aa033de3246a0f24e9a73c866d6e7f1d411e98da53c76020db588f4b214d44ad6e536d2b7f1e7a0207fd73036d92ceddc5da5c0504448c6c2704735bc6470d10193861e15530708a020f669676f97c6585f7cbe5e405c4f9a4964fad36fe4dd6aa13c6b80a60d901ba061b56b1bcd12005d252197b44f28f611d2cf4448ca57784a8f17ac2b23cfd519a0aad0bfda854bfaef052cc6659d84e69e4b0325e6b8fa394961694e2c3b758203a09da958cb8bc74373e66cf40708a152f31d2c6ac305fcd1af07a25e3e34801227a0edfef4c130b1198a28da1ae2fd66c33d2d1e98725424b9383dee7136360c7036a04c64086b040c6a3701a1b2bedead55797c95c5d635699e66950fcf9c6215ee02a00320a92427efbd2cbe8f70c7c74aa5db0c145b75148808a317a2ccab2cf437f9a0884d942adaa313a922d0883e8139fc6a92acf16e95d2c7d06b4e53a08fdab69280",
|
||||
"0xf90211a037049228c0254f0105b8f461536b772d38df8e4b8bd7f908be72982a86a35961a0d23d1b2a16afe975ac636a8720e5d9fe14dd999e47f5d9e43fe86b2907134705a086cf6044b7e6be2a9c312cf4bf438d464f111fc19fc0abf80c8ab31644bebd06a05bc25ec41da09b0c76b897525589bd03dc90b482ec59e6a1ff14102217f2cd6ea086e9e5952917cf0e054e0e00e0085d7d3bb6a704e55ec5739b6705e4e6539d9fa0148e465f1f1f6095bbcb2feafc49ffd5f604b7439f9b4ab0437f8cd7acf1adf6a0bd2bb1bb25bf43758ed57d63ead3a619cc3a94d47be1b84b4208b24f5b80094ca037ad5b50e846bb85482548cc5a99a03e1db02aadbf61f1380f61bd9ad7ac4704a0a0967620f115f194f7a0c16c7e13492646507ac7dd8553e97b7ebf416228e1f0a0f42e67ae7d57f618596858a5a7239a6039b0dc751d42dbf47bfad47a36a5a59da0efbe74b7c05b343f3e29d1fbfcaab58789c99cd301b87442363efa0a2c7a395ba0c8b4d32dce4b607dc21e9c4ac3ed9757640c760582cc1ffa4679c4dbc2b2e0bfa057addd95ffe7c0de9774f2e3790a52f262515fa6a2a65a9fb785451a6e3ad2f4a094d55a6f5ae979bfc6c6f59928f2850206c5af3caedf39386939a053a2c7b79ea075b8f0a832023c355b067f3786edbea9547211d8cf2dca5f89f9a413b9b525c0a0757df921602607a9115e97c1ca0e4acbf0a2d4ff3bc6e7ae2b151b88359f190c80",
|
||||
"0xf9017180a051c6427ab0bc0d3b0db47b82e69a31fec1670e8ffe2ec57356a512c82083a6a5a0dd0af4a616a626aea8529e07f9017ae356087c45c92ef851aedd845987cccc46a0457441ca9402fb91326638832a9a169e021608db12c58d0e7778c1b13add1afea0db1a3351b7f76cb3170ecc91fd0c687ad46378dd392944612f4c68bb9fbe1050a000c1cb0f8f7bd89d04fe5ed1da96fc769c67a27b3c822a8653397e7da6a04730a0b63f0c4914683ae30b031264fef21806ac7a1a32ccfd05c011ddd0202e06b275a0ac66fa130cd31b0e4b15b08965686162a3efb93e3a07ce45859b34e9a2b4112e80a0dfbd89ded3590a54e3b47e540457b06c754b7b0d22cab361a79adbde4e3d96c980a0cb72d7bbf7aab515231c32e9399359c91aff95accd474e39a091fa2b9e71259b80a04a2be13b00b2032cc0c7112be04907d8d0fa0968932abe8dfdda6c6bb07813a680a010d29a9d3186ad1e4ad1c518be391c44180ba8ce1db0f09a2c9ed23ea017733980",
|
||||
"0xf8669d33239d97e43f5062453663ffd198f40e6120b1057a77480a17b59f8d8cb846f8440180a07a5f002403d62f9d1ab5b4684459d1a2e5170075efb41f51f94fdb30b5e6d46aa073f5b0f762a0557ec4b135108e719884532887167fa14c0d6b7807943d70d96d",
|
||||
],
|
||||
storageProof: [
|
||||
"0xf90171a0b5a85440d5fc74ec55facadb9dbc0cbf35ae1eacdb841b17d6943721a7028fe680a073d52ce999835ee363c087004b4de88b619f66f3dc94d35be5e0b17869d7ece2a042bf377671e60c1d6aad75c93a25c72f0a0c7c2fdaf732b1ae508dc937ebc0be8080a0a32f55598dbc06e6742074f3ad6812f923f9a9f991e597763520cb939c5440df80a01a7798f0e3bd3bcf90d8150e03e9220c1547aa70037856b2961b5fa8dcaaf974a0fed5524862371f728f0e99114f0a09685044436cf34c22cfe4401ec4ec03ffcda06bf06cedf7b90669bac0f199b18bceca612452bb315f1386645bfbd52205a476a04d2c61e0aa8cffbb121715c333a6289570d450cd77f44d327212f404bdc932b6a0af144ae5e9f31fe6da35eac694185fdf07aefb9da7f4c652645bd7f0c7253e85a014f49d31860c00b7dcc901e44c39f3050b2e3f3b8013c0af887778813da9b97b80a0ce3ae4b74569ec95d0d116928f28245839a0c0629d2ec86081ee4896f9a2785880",
|
||||
"0xf8518080a08bafc792d182fe0cac5c7dfb236bbc88dfd0ecf5505b681d1c256d75aa6858fa808080a03315f891bf9433a5415e982ba0f5b3d4497a2a44cb9a958d0830fe301fecae4d80808080808080808080",
|
||||
"0xf843a0205a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8a1a0639f404f0000000000031d02a5d2b33515ec000000000000072629ee1252f3a0",
|
||||
],
|
||||
},
|
||||
{
|
||||
block: 16212808,
|
||||
account: "0x55e617b7456abc2545cbb547e0102279a6c430c3",
|
||||
storage: "0xa6bcd7cfb5e938d75f4330a273812b53f809408efd4332627beb0285fa4a8732",
|
||||
expectedRoot: "0xcb31a10f1562c0c36bd4ceadaa95dd6234fdc02e8cb9357339e507e6b24584bf",
|
||||
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
accountProof: [
|
||||
"0xf90211a000cc1eab958a3f1de15398fa2b27750166c045e8eceb809b9d4501b4e02b7ddfa04da48723172332e782e1bf0e4fd8b1e5f98401d48596abd9a9705496167d7027a0dac4dba16da7ed6cfcca82063ccfc4f47b6fc53451538f7d0d1c7938f038940ea046d8e39429c81dd993f99d3b11ff4cda11bb8c8696b9d93472e71356b8041832a00b940d003e8cf026d1ada0202aa2abcae53098c85486eb7a534af6b3004f8443a090045c5a2ffd3bd39f80bdfa1d6e5fcf75105c81484ddfbc5dd4b4d73f6b66a6a0e4832a0fde78437c8fbedb522d2910e707bd1cf80f2e10b9c6040da0b05105d6a0a0e93dc2989f5868d5cbdd62ec70157cfc4b2064daff79586208e60d68221aa4a0fc7fb69a47e3d6b002c776b83f17fe0c9afbd922544e49bc69118d4912f814d4a0521e58e73a9dbcef30b0c02af1283b7907abadad2531687321fe8ab200027879a04d23897e8ec61a693cab8d2252fa69ac45731de79e47e40711c363b1e7062fcea0290185b97c0ee9f00882f0c50b493205d1a22128338dbb6bc7aa6770c7badb8fa090b9f84dc7ffe1e953274bd3076e5ac2c6faa2ffe8335a394eafad186f9491a3a07e8faa0688cfc77a7cf1011832d69385499ece50ca53ed08fefcd4aa07ce4409a0dfc42b6dac4d479a49a3d2d18ccbc5bd9f98f055294ad62cab84cbc6267f85d8a0b2f0f894b7cfd6834f2c9b58cd5941c8477961169ff6579dbf026a28a871ae6080",
|
||||
"0xf90211a0cc88a0d04fc4367ca945630ce6266a93178092ce38ebd0d3976125c80d9e638ca0110ac16b660ba8dfc12258f9ade49be6245e7d229c4b40ad947c2bfe584a5ddca0b7884a63fdd62c909b9325f29ba55203c8368eb48c420e950354387594c4667ea01079982e5d8a202ed036f8bdd6ddc48b2eaa7c3e8c6efe8ac3369b997ddf2179a0210f2400b4faac315689001a789a6db9313cd835cdcd31df96eb65c433b115e6a089bb0553166bcb16346053be885e361d2d67cab44f7b959aa9cd826dd45ac61aa03b87244622f87055fd869434afd48c8b3ad5a9607b747f6c0d2a5f294e362ff8a06fc34c186c23de6726ff12f731925d0c022b1e28099c6ef1fa3f19423e974396a074eb379a4d71fa80108c16d71c9c9987bfe618f2f9c46dc42db411aa42984b78a01c679db1c89150fdf004859b6a69bed2045f4b8731b221e4a07eecf35539eb5aa0e5d137fd960b8cbb20606bf9e7e9fafbde54c409d1c4613dd80a17de5b47bc5ca037f5902b0eefe2f4d3a498af280683cbb6e24a257548f49c8a5541269da4960aa03ea41d2be7cbacac4f8ca44bf03d0bcd589a01c10a0bffafb8fb1da5a70e3068a0bb980af567ae4b71f44121bc9041c5e32d971511629ab7a40609a36f45981ccea0ff394894d8deeda5f8548486bb5feb71b870a8a431c0841886a38de9914bf188a05afa6d271d39956efa0b6a37f849587fc9ffdd0b21b0f03a7475075f3db941be80",
|
||||
"0xf90211a0074eb00b4c1e2c7113935bee95ed4348415dc85b368967d4d5c4d76196af424ca0813772f3b53979306c3b6099563da22debe31524b46c489b304f5dd00e38790aa06c3522ff7176e7802c0565d7c0861d3b3d84bd4cdc335af7515cff8d08ca7fb7a0c768ae9e22fa57009ecc980b0550fdc1c0b1b4347a505cc4f2305681480cc792a06163dac89c2f3a035f43a559f71c94c2ef275974bb653c51db896c3326c460e1a06c56dbb2e85d467a81935889edc1e1fbda078896de4e9c4ffd33ff780137ce24a0e0da97eccfc2ad11d6489af5e6c646bbe8fae20c85fd1614e84589ac69b7b110a03eb954e27f07ead9cc4013f091b2b3fca8163d3ff052c6a4741d7b652161e4b8a0d1318e44803ed8fb732a840a9cd71eb2d0dadf601f8b8b77251b6de06776513da0a14970d414825f655862751df3bbd7fcbe9903adb663690cee115b4fe880a7c2a02cbead1eab47e6575d0d9d488311b4f16199fb8acabc3bac662c698d471649bba06acca947c81f7bf0b05c8218e615ed3642d1e812c3c696b76d19d9e95207ff89a0486a337786d2e2c7b3e963a9efb9216a79ee5cf61675b9aacc9cf3f35c403559a0e3743c73438b616f23323e8722d90afed16956be9d9763d35968619ef644d893a0437723f6d8ed5906cfc7f1254f80504c15f394db51148357f4c7ef0ad01833cfa032e215e323cfca2dbdbcf7056012ddadaaf9f8b9a4269f451ec28a19018fc76d80",
|
||||
"0xf90211a079d20a2fe4ae7cb0a24db95f0cbb8a32bee53ee9910a9fe8959cea9d6d584993a01a486e762c0b7f5e99f02596a5250acb7d6d54d2626b7ba6c23d97931cbe3296a07b56e82ab24b02849378030bdca3ae3bfa19af23363c731537da3b47dc64c299a0a3c947fac18c8907db5321a2cf28e6fa0205d074db959c12e25fd0c9c64f64a1a09f658920397018751aa15e1feec3ec81dfd19988e590b663c6efefde407e2a3da0e5dcbae4c0fc4c9a050afb5ab81b71a76cd201d9ad724894518aecb7ae557079a02eedf89c6286ad0d5a8f24ead4c300646452191765d86b7217b7c503c9d93bdfa006d8c0bbf530c40ed8bcd89dcdb4cba927d3363c4eeb3fd7452c82f986506630a0fe7850edfeabfb584ab06d0a1599d64ae60b143cfb3bd8bd1cecd8e918b0c7f2a0d09c9c7f8b8280ec01c8fabdc75a5abfb9600e5961b3f1a2e62313e81bac9d26a03c5944032432601c395a4a4c31c3feed3606881df80407e7c6e45a82455f85b2a02dea3f595f1b07648456f39b39a6b337d5903373c5c194da04ecdbde82b40b41a08cff9a982b8e0fff7d588cf0fa0116f039db921c13fae79a5f7e8333ad4d3a18a043dac2f48ff878530faf63341dbe6baef3faddfd049f7db4c183079c77916b17a05e15585f071142178813285614aa8abd4c65a95f516df528ae2d3c6ee08521d6a0b0dfa07c70efbcc57f2b6cb7f77a7a20fc9537c0d984a676ec9926e55f71c77880",
|
||||
"0xf90211a0fb721ad628030689ef65168ed001f566cd190e9c4d0219c02afdaeb004d4e214a051531d55c79e21f006c4741486cce4d8a6e613d761b3bde9ae2c8040a76b307ea02213a6ad7395c88dbdf89f0b29acf67c6aa1dc0e374cc6a7d9b6ec3d9fb5f373a0686884cfda50455fdff45f4170feee963de6d591770c269f4241e34563d70f37a0bb97314a2c0642f5abc066b8d53634887234dc37d3f9538124fcd27225b75733a01a296ff7ac3bd812e706959caa4748f04cc0729fdfb14388e244347b4a3cb685a0888eadb1b0d48cf03e73398f2aaef32bb4ac265f10f76c504db269250e88fd55a0b9a2aca21bcd60c94c2a80fdade417a56fabd041b6c0379159f02ff2fec8ab87a0db49371306dcf9e9d4ae471429c2bd4affe09cfc065792ba6410595f8beea2c7a0bba4db70589218f90ea48d1f870db783f8aae6cda9dbf72235e1284b97313dc7a023658240a8e60035480607bd2e2c780d0a305909fb06e9c6befe7701e78b596fa0d6f93b95c1fe2238e73c72e96cece7d21189e5c8e839bf0d42bf226a105c08ada08ccf4640a918ae9ea713e739e62f7b5c5bee3864b57c32e29065593cc8457171a0ad5dd98fe06ccf15db195de3aa8d071be23965c64839e10d1e9721cd64f26382a0e5a987a544eac0a3abc6ef9ddd12d8450b3e11f36d6239cd799595535ea0431ea054db8f7995108a2c8bf8976723682ce513241f75da97518991c6b77c050d398280",
|
||||
"0xf90211a00dffb8e3f1d162560bd7fa2851c9475cd30bf41c78017372b8f5d40360831308a03348160896064725af3bb64dcf86a8fab726fdf442007fd3825c689553a314f8a0794309daeb0692594a7bdd16e884ebd75d598db16cd1005a9176fce869b3580aa01150f6cb195cd24622f9440d2ea824b33601d7d7983db0d925d39daa1695c950a04b61b2cd4bf2ec97550c27656b784df083bac7653352920b3bb0404bb09f1971a040e09010e7c233361e66eefeba4c03cd551580bf394ed6fb6cb8f921184a9f95a05ba6ce8c14236c7b63aedcf3f85603846062c43d165c207cb5418cb7f06dfafca0fb016acbbd9d14aa6bdb9b01e802ffe495032e97d124c9a0b65d10d4e715f39ca039c0a367372d7a14a34ecdc997bf67bc72755b4c7270effee6d90824aa15b087a0c7ddc7fc8c0341c56fc7e6228469b25f7af926b3fd099530c6097a02c869c41fa0ad11f0c6cad46f32f1ce178820941063777e2871fa38d5adfbd91672479213afa03b11a8fbd61e8e0f3f18a7c3573ec7a792edd83d3a6b8efac10ebb8b157ac17ea0ab266aa84b02ef61bc71faf1d261e2bb90cc21d6cbdf951b122f596f5eb3d90ba069b80832c46cf88b0ca1e64054d87f2e33f27df1f322c70af89f4ec909313bcea075068d344e56b3eada6312fb613925c3c9de369b328ee666121dae6c052103c4a0c3b65e68859146733573b921e8fb036ca7c9434b0f52412ac0cd169950f95a5580",
|
||||
"0xf8d1a0db401bdef3bd74dec5338135194e69ab43e15aa891e5de20ef3e57cde5366ab5808080a0621c1fbbb026eddb70a4c645e152dcf9b3f1b40b9a1bdc4398a22bee4a46aca380a05528de70186525019cdf0880afb77b33ae4871fbfaad3e8bccd7dcb6402d746580808080a020fa8ae1091998f03c979f94e94ff6c011427da2834f1dffaec815fd3c5fa6e080a0611ff1f45d926197480694e690227d603e84e7c44b520473b9786cd4fafaf613a012dfcd444d4948c86a3dbad8f4f1dad09c313a63e6f8bf0ecb7bd799908aa3248080",
|
||||
"0xf8518080808080a0cf86ad50e7ed35be6080c4cd74d835e58867b2e2ec03198baf29962de46a8cbe808080808080a03a5ec92acf98ebef8eeb621707a501ed0fd95186282ab1dcf8e7286a9142b90480808080",
|
||||
"0xf86e9d2019df8705960e4a0a7ac52ab662c57cddd5f60a7f75f0c117ae2e073fb84ef84c0188067eab853ae20000a0724a8bd0aaa1c991a445a1e974deecd8cfe4ba2040de2578e98238b9f963ba8aa01717795a0fbfac056a8306e5cb0ac160c3ad752357e0360a408e59acd35ebb1c",
|
||||
],
|
||||
storageProof: [
|
||||
"0xf90211a06a128b938cf5a3be9f5c7a8944945258db0b7a939cab65a4bda8fc4a8a2bf16aa0c61e8e76eede0e8a446743dde629574cd69dfe612aa0d30c6c8cafdb7f445214a084c1c16c0f4fae18501251afbc28ef21caf9b2f1b5a8f2f0b6b87d076f44f7bfa04ace3470f520e28ebdfa4e98a5ef51af05f647a3d1585f0d98f3393098839f17a0d0628e1db39bef70e79ceb5860a14b34b78eca696ec7910f3bfc91631a0abd50a0b718050b33452d627f87f02ba8b05f976e7eeb2c81cdd445770eeeefba236fa9a0f0a8fb4ce1456839b267d76b94838113ea18600fefa3617733888b1ce7da7ef7a034dd7e5a07aff6c7d141c66b4aab81f3f31363a92d48a9bc1fe072b94d69bf63a049c1f246035c714f4d6e8d81e7a20aef93140d067012314b37489a66f4e19db4a070279280c8be3e03684124acd488d9611ea5dbf62512280eb352980ec8334436a04e7d88090f29162b58e6fdd44446f90c5bc1c39c377d7c757c6010e9a63c738ca05eaab99620fe77019cac5ea6854f3efda933ea60f1326ecd03a32494850556a4a06b4116e3177b3012c5e06ab564d1a0611140ee2b81d50c8fd8c5aef333296965a00874454cb37dd61c28f8bb7da5d905f5dacf0b813914099244b65f536561e22aa073f2018c86cfe905a5bb8f69b43395c949714183a829990e0e33630431af8f86a03f36076859c730c0f5851ea263b5650dab7235f3c8ce258f74a3ae3b7d38add780",
|
||||
"0xf90211a036c69f765a83b393b27f21eeb941b8a2965ec7d436b3965a5bc40953a32884c9a04fd94c2ff0df3a8453b14a36a55ee9a15096180c12feaddb7c904016e0250491a00b3973a34de7950e6eed8e413dacd2414ef22a90ff9fd322501301e159a2c081a0f6926e67b5dbe04b3991297ca0bd8f1fe63b1f193e16621c901ac81ad9c25a85a023776d17051b8899483fa00c050cc50eae159dbbd7b59a35290b6d6b272e07c9a00900c56dcf2ae9bc0d19ec918cbd7fe63e7afb8aa2d962d1d5cf5886c763a7dfa01e52f9000865a4df376396fac674f061a61603647923a3a577387c54e1b32826a02373c893c5feb4c772f345f6609f9f9a6032c068f2453aad191626ce6a2d625ca09fdcde9e12f55bd9b3bfb323f1c9a7488f573e0b01d829d6f0ee716e92f0f248a0353975fd758f23275ce22c485c939c781ba31aa8c6026688931ac61d0f0d8013a04f0352b630e3ae315c64d02f85c4cfc255524b445046426c3e67f6608a9689f7a0d3b728caecb48e019db5f0144aa081ce5954c8acadeadd3df36d25b6e24a7e0fa0758800b10d88e8b477fc17a2094b5a41aa69c37740305e1956ed558dd5dcd86ba09f51f4aeb641e8c068dc1370a71942792b4d30a572ad0c09eeff206f7dbe3355a0c5d5fa6fa22f56ac27c0f6538f94615e0e7bb49243d888ec6b0c86f61dc6922ca03b7be4e1038893b7cfeab5a172995ac07e1e90142cc566ddc2b613e3b2a08c3880",
|
||||
"0xf901d1a086ece613a3028576c5e26a4ff50a9c3311c3bb3ca3751b8e52d2667b18917f4f80a0c54874707f838e0a2abf666fd3c50f900c9c0c38e9a69b37551b1711acef7eb1a0b7e0dc7b68d45f0f52e17301906d038323861fdb60583c6f505f76d304c73ea5a061a5e1481d528ed55ed1dafcbbbecc99276220ccce4ce56f50a05853638a3c4da08db8ab11699112f1f4cebece052af297997fcd361f5ceb88db4a7168e0366cdda0b1c641b80c0e5642b33866b899afc25070277d24665b6d73bc542543592c6eb2a069ae6c87a4a8692ea804b51379521e3856a6a980a1f6143f19ffaaa397c1699fa005e0523d440c3fb4654841a3d8ccce6e5eec4cd5f145668c0e95e847c9c4c39fa06c86477d3592a33fea0e7e425cef6b79610cd32b3bd17fa1318a5e20c9feb02fa0fae05cf440cf5cbb96e83bdd1828f5a582aec03edbb87e5035fd08660f09691980a0d91b6dc415a8c148823a7f865963d2b527c6a93bf882cd29da46f9a9594b4c41a03537d0ab40aa8b56059d99680365cc017a26fdf155fd6f2a7788311723b80738a0e38ba0e1b4f98b4b9f1925ca952a6a9076eda1bad2e36dbc80bc5135372a3feca086f2109580fb4d1a26bb9101b88c407eb13fade29243d68b83764716dd450e3980",
|
||||
"0xe19f36c2516eb411c7c89f75dcf98d8ff95555585215a5f6242b4f24adbcb7424901",
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
describe("PatriciaMerkleTrieVerifier", async () => {
|
||||
let verifier: MockPatriciaMerkleTrieVerifier;
|
||||
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const MockPatriciaMerkleTrieVerifier = await ethers.getContractFactory("MockPatriciaMerkleTrieVerifier", deployer);
|
||||
verifier = await MockPatriciaMerkleTrieVerifier.deploy();
|
||||
await verifier.deployed();
|
||||
});
|
||||
|
||||
for (const test of testcases) {
|
||||
it(`should succeed for block[${test.block}] account[${test.account}] storage[${test.storage}]`, async () => {
|
||||
const proof = concat([
|
||||
`0x0${test.accountProof.length.toString(16)}`,
|
||||
...test.accountProof,
|
||||
`0x0${test.storageProof.length.toString(16)}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
const [root, value, gasUsed] = await verifier.verifyPatriciaProof(test.account, test.storage, proof);
|
||||
expect(test.expectedRoot).to.eq(root);
|
||||
expect(test.expectedValue).to.eq(value);
|
||||
console.log("gas usage:", gasUsed.toString());
|
||||
});
|
||||
}
|
||||
|
||||
// @todo add tests with invalid inputs
|
||||
});
|
||||
@@ -7,6 +7,7 @@ import { console} from "forge-std/console.sol";
|
||||
import { ProxyAdmin } from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import { L1BlockContainer } from "../../src/L2/predeploys/L1BlockContainer.sol";
|
||||
import { L2CustomERC20Gateway } from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import { L2ERC1155Gateway } from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
@@ -19,6 +20,7 @@ import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStan
|
||||
contract DeployL2BridgeContracts is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
ProxyAdmin proxyAdmin;
|
||||
L2ScrollMessenger l2ScrollMessenger;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
@@ -31,13 +33,14 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2CustomERC20Gateway();
|
||||
deployL2ERC721Gateway();
|
||||
deployL2ERC1155Gateway();
|
||||
deployL1BlockContainer();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployL2ScrollMessenger() internal {
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
L2ScrollMessenger l2ScrollMessenger = new L2ScrollMessenger(owner);
|
||||
l2ScrollMessenger = new L2ScrollMessenger(owner);
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(l2ScrollMessenger));
|
||||
}
|
||||
@@ -96,6 +99,14 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1BlockContainer() internal {
|
||||
L1BlockContainer impl = new L1BlockContainer(address(l2ScrollMessenger.messageQueue()), address(l2ScrollMessenger));
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
logAddress("L1_BLOCK_CONTAINER_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_BLOCK_CONTAINER_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
|
||||
@@ -54,7 +54,6 @@ contract InitializeL1BridgeContracts is Script {
|
||||
|
||||
// initialize ZKRollup
|
||||
ZKRollup(L1_ZK_ROLLUP_PROXY_ADDR).initialize(CHAIN_ID_L2);
|
||||
ZKRollup(L1_ZK_ROLLUP_PROXY_ADDR).updateMessenger(L1_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
ZKRollup(L1_ZK_ROLLUP_PROXY_ADDR).updateOperator(L1_ROLLUP_OPERATOR_ADDR);
|
||||
|
||||
// initialize L1ScrollMessenger
|
||||
|
||||
@@ -3,10 +3,12 @@ pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
|
||||
import { L1BlockContainer } from "../../src/L2/predeploys/L1BlockContainer.sol";
|
||||
import { L2CustomERC20Gateway } from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import { L2ERC1155Gateway } from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
@@ -19,6 +21,7 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L1_BLOCK_CONTAINER_PROXY_ADDR = vm.envAddress("L1_BLOCK_CONTAINER_PROXY_ADDR");
|
||||
address L2_SCROLL_MESSENGER_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
@@ -30,6 +33,11 @@ contract InitializeL2BridgeContracts is Script {
|
||||
function run() external {
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
|
||||
// set block container for l2 scroll messenger
|
||||
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_ADDR)).setBlockContainer(L1_BLOCK_CONTAINER_PROXY_ADDR);
|
||||
|
||||
// @todo initialize L1BlockContainer
|
||||
|
||||
// initialize L2StandardERC20Gateway
|
||||
L2StandardERC20Gateway(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR,
|
||||
|
||||
@@ -24,15 +24,6 @@ async function main() {
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
}
|
||||
|
||||
const L1ScrollMessengerAddress = addressFile.get("L1ScrollMessenger.proxy");
|
||||
// if ((await ZKRollup.messenger()) === constants.AddressZero) {
|
||||
{
|
||||
const tx = await ZKRollup.updateMessenger(L1ScrollMessengerAddress);
|
||||
console.log("updateMessenger ZKRollup, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
}
|
||||
|
||||
const L1RollupOperatorAddress = process.env.L1_ROLLUP_OPERATOR_ADDR!;
|
||||
// if ((await ZKRollup.operator()) === constants.AddressZero)
|
||||
{
|
||||
|
||||
@@ -6,10 +6,8 @@ import { IScrollMessenger } from "../libraries/IScrollMessenger.sol";
|
||||
|
||||
interface IL1ScrollMessenger is IScrollMessenger {
|
||||
struct L2MessageProof {
|
||||
// @todo add more fields
|
||||
uint256 batchIndex;
|
||||
uint256 blockHeight;
|
||||
bytes merkleProof;
|
||||
bytes32 blockHash;
|
||||
bytes32[] messageRootProof;
|
||||
}
|
||||
|
||||
/**************************************** Mutated Functions ****************************************/
|
||||
|
||||
@@ -6,7 +6,9 @@ import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/O
|
||||
import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/security/PausableUpgradeable.sol";
|
||||
|
||||
import { IZKRollup } from "./rollup/IZKRollup.sol";
|
||||
import { L1MessageQueue } from "./rollup/L1MessageQueue.sol";
|
||||
import { IL1ScrollMessenger, IScrollMessenger } from "./IL1ScrollMessenger.sol";
|
||||
import { Version } from "../libraries/common/Version.sol";
|
||||
import { IGasOracle } from "../libraries/oracle/IGasOracle.sol";
|
||||
import { ScrollConstants } from "../libraries/ScrollConstants.sol";
|
||||
import { ScrollMessengerBase } from "../libraries/ScrollMessengerBase.sol";
|
||||
@@ -22,8 +24,10 @@ import { ZkTrieVerifier } from "../libraries/verifier/ZkTrieVerifier.sol";
|
||||
///
|
||||
/// @dev All deposited Ether (including `WETH` deposited throng `L1WETHGateway`) will locked in
|
||||
/// this contract.
|
||||
contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMessengerBase, IL1ScrollMessenger {
|
||||
/**************************************** Variables ****************************************/
|
||||
contract L1ScrollMessenger is Version, OwnableUpgradeable, PausableUpgradeable, ScrollMessengerBase, IL1ScrollMessenger {
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from relay id to relay status.
|
||||
mapping(bytes32 => bool) public isMessageRelayed;
|
||||
@@ -37,6 +41,9 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
/// @notice The address of Rollup contract.
|
||||
address public rollup;
|
||||
|
||||
/// @notice The address of L1MessageQueue contract.
|
||||
L1MessageQueue public messageQueue;
|
||||
|
||||
/**************************************** Constructor ****************************************/
|
||||
|
||||
function initialize(address _rollup) public initializer {
|
||||
@@ -44,7 +51,9 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
PausableUpgradeable.__Pausable_init();
|
||||
ScrollMessengerBase._initialize();
|
||||
|
||||
messageQueue = new L1MessageQueue(address(this));
|
||||
rollup = _rollup;
|
||||
|
||||
// initialize to a nonzero value
|
||||
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
|
||||
}
|
||||
@@ -70,7 +79,9 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
|
||||
uint256 _nonce = IZKRollup(rollup).appendMessage(msg.sender, _to, _value, _fee, _deadline, _message, _gasLimit);
|
||||
uint256 _nonce = messageQueue.nextMessageIndex();
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
messageQueue.appendMessage(_msghash);
|
||||
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
}
|
||||
@@ -85,8 +96,8 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory _proof
|
||||
) external override whenNotPaused onlyWhitelistedSender(msg.sender) {
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
) external virtual override whenNotPaused {
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "Already in execution");
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
// @note disable for now since we cannot generate proof in time.
|
||||
@@ -96,16 +107,22 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
|
||||
require(!isMessageExecuted[_msghash], "Message successfully executed");
|
||||
|
||||
// @todo check proof
|
||||
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
|
||||
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
|
||||
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHash), "Block not finalized");
|
||||
|
||||
bytes32 _messageRoot = IZKRollup(rollup).getL2MessageRoot(_proof.blockHash);
|
||||
require(_messageRoot != bytes32(0), "Invalid L2 message root");
|
||||
|
||||
require(
|
||||
ZkTrieVerifier.verifyMerkleProof(_messageRoot, _msghash, _nonce, _proof.messageRootProof),
|
||||
"Invalid message proof"
|
||||
);
|
||||
|
||||
// @todo check `_to` address to avoid attack.
|
||||
|
||||
// @todo take fee and distribute to relayer later.
|
||||
|
||||
// @note This usually will never happen, just in case.
|
||||
require(_from != xDomainMessageSender, "invalid message sender");
|
||||
require(_from != xDomainMessageSender, "Invalid message sender");
|
||||
|
||||
xDomainMessageSender = _from;
|
||||
// solhint-disable-next-line avoid-low-level-calls
|
||||
@@ -142,40 +159,16 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
|
||||
|
||||
/// @inheritdoc IScrollMessenger
|
||||
function dropMessage(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
address,
|
||||
address,
|
||||
uint256,
|
||||
uint256,
|
||||
uint256,
|
||||
uint256,
|
||||
bytes memory,
|
||||
uint256
|
||||
) external override whenNotPaused {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
require(block.timestamp > _deadline, "message not expired");
|
||||
|
||||
// @todo The `queueIndex` is acutally updated asynchronously, it's not a good practice to compare directly.
|
||||
address _rollup = rollup; // gas saving
|
||||
uint256 _queueIndex = IZKRollup(_rollup).getNextQueueIndex();
|
||||
require(_queueIndex <= _nonce, "message already executed");
|
||||
|
||||
bytes32 _expectedMessageHash = IZKRollup(_rollup).getMessageHashByIndex(_nonce);
|
||||
bytes32 _messageHash = keccak256(
|
||||
abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message, _gasLimit)
|
||||
);
|
||||
require(_messageHash == _expectedMessageHash, "message hash mismatched");
|
||||
|
||||
require(!isMessageDropped[_messageHash], "message already dropped");
|
||||
isMessageDropped[_messageHash] = true;
|
||||
|
||||
if (_from.code.length > 0) {
|
||||
// @todo call finalizeDropMessage of `_from`
|
||||
} else {
|
||||
// just do simple ether refund
|
||||
payable(_from).transfer(_value + _fee);
|
||||
}
|
||||
|
||||
emit MessageDropped(_messageHash);
|
||||
// @todo
|
||||
}
|
||||
|
||||
/**************************************** Restricted Functions ****************************************/
|
||||
|
||||
@@ -9,6 +9,7 @@ import { SafeERC20Upgradeable } from "@openzeppelin/contracts-upgradeable/token/
|
||||
import { IL1ERC20Gateway, L1ERC20Gateway } from "./L1ERC20Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IL2ERC20Gateway } from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1CustomERC20Gateway
|
||||
@@ -16,7 +17,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
/// finalize withdraw the tokens from layer 2.
|
||||
/// @dev The deposited tokens are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// tokens will be transfer to the recipient directly.
|
||||
contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
contract L1CustomERC20Gateway is Version, OwnableUpgradeable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
using SafeERC20Upgradeable for IERC20Upgradeable;
|
||||
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ERC1155HolderUpgradeable } from "@openzeppelin/contracts-upgradeable/to
|
||||
import { IL1ERC1155Gateway } from "./IL1ERC1155Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IL2ERC1155Gateway } from "../../L2/gateways/IL2ERC1155Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1ERC1155Gateway
|
||||
@@ -19,7 +20,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
///
|
||||
/// This will be changed if we have more specific scenarios.
|
||||
// @todo Current implementation doesn't support calling from `L1GatewayRouter`.
|
||||
contract L1ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL1ERC1155Gateway {
|
||||
contract L1ERC1155Gateway is Version, OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL1ERC1155Gateway {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC1155 token is updated.
|
||||
|
||||
@@ -2,11 +2,13 @@
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
|
||||
import { IL1ERC20Gateway } from "./IL1ERC20Gateway.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
abstract contract L1ERC20Gateway is IL1ERC20Gateway {
|
||||
abstract contract L1ERC20Gateway is Version, IL1ERC20Gateway {
|
||||
/// @inheritdoc IL1ERC20Gateway
|
||||
function depositERC20(
|
||||
address _token,
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ERC721HolderUpgradeable } from "@openzeppelin/contracts-upgradeable/tok
|
||||
import { IL1ERC721Gateway } from "./IL1ERC721Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IL2ERC721Gateway } from "../../L2/gateways/IL2ERC721Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1ERC721Gateway
|
||||
@@ -19,7 +20,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
///
|
||||
/// This will be changed if we have more specific scenarios.
|
||||
// @todo Current implementation doesn't support calling from `L1GatewayRouter`.
|
||||
contract L1ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL1ERC721Gateway {
|
||||
contract L1ERC721Gateway is Version, OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL1ERC721Gateway {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC721 token is updated.
|
||||
|
||||
@@ -8,6 +8,7 @@ import { IL1GatewayRouter } from "./IL1GatewayRouter.sol";
|
||||
import { IL1ERC20Gateway } from "./IL1ERC20Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IL2GatewayRouter } from "../../L2/gateways/IL2GatewayRouter.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { IScrollGateway } from "../../libraries/gateway/IScrollGateway.sol";
|
||||
import { ScrollGatewayBase } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
@@ -16,7 +17,7 @@ import { ScrollGatewayBase } from "../../libraries/gateway/ScrollGatewayBase.sol
|
||||
/// All deposited tokens are routed to corresponding gateways.
|
||||
/// @dev One can also use this contract to query L1/L2 token address mapping.
|
||||
/// In the future, ERC-721 and ERC-1155 tokens will be added to the router too.
|
||||
contract L1GatewayRouter is OwnableUpgradeable, ScrollGatewayBase, IL1GatewayRouter {
|
||||
contract L1GatewayRouter is Version, OwnableUpgradeable, ScrollGatewayBase, IL1GatewayRouter {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway);
|
||||
|
||||
@@ -11,6 +11,7 @@ import { L1ERC20Gateway, IL1ERC20Gateway } from "./L1ERC20Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IERC20Metadata } from "../../interfaces/IERC20Metadata.sol";
|
||||
import { IL2ERC20Gateway } from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1StandardERC20Gateway
|
||||
@@ -19,7 +20,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
/// @dev The deposited ERC20 tokens are held in this gateway. On finalizing withdraw, the corresponding
|
||||
/// token will be transfer to the recipient directly. Any ERC20 that requires non-standard functionality
|
||||
/// should use a separate gateway.
|
||||
contract L1StandardERC20Gateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
contract L1StandardERC20Gateway is Version, Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
using SafeERC20 for IERC20;
|
||||
|
||||
/**************************************** Variables ****************************************/
|
||||
|
||||
@@ -10,6 +10,7 @@ import { L1ERC20Gateway, IL1ERC20Gateway } from "./L1ERC20Gateway.sol";
|
||||
import { IL1ScrollMessenger } from "../IL1ScrollMessenger.sol";
|
||||
import { IWETH } from "../../interfaces/IWETH.sol";
|
||||
import { IL2ERC20Gateway } from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L1WETHGateway
|
||||
@@ -19,7 +20,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
/// as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract.
|
||||
/// On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then
|
||||
/// wrapped as WETH and finally transfer to recipient.
|
||||
contract L1WETHGateway is Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
contract L1WETHGateway is Version, Initializable, ScrollGatewayBase, L1ERC20Gateway {
|
||||
using SafeERC20 for IERC20;
|
||||
|
||||
/**************************************** Variables ****************************************/
|
||||
|
||||
123
contracts/src/L1/rollup/EnforcedTransactionQueue.sol
Normal file
123
contracts/src/L1/rollup/EnforcedTransactionQueue.sol
Normal file
@@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
|
||||
|
||||
import { IEnforcedTransactionQueue } from "./IEnforcedTransactionQueue.sol";
|
||||
|
||||
contract EnforcedTransactionQueue is OwnableUpgradeable, IEnforcedTransactionQueue {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev The number of seconds can wait until the transaction is confirmed in L2.
|
||||
uint256 private constant FORCE_INCLUSION_DELAY = 1 days;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct Transaction {
|
||||
bytes32 hash;
|
||||
uint256 deadline;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The address of ZK Rollup contract.
|
||||
address public rollup;
|
||||
|
||||
/// @notice The list of enforced transaction.
|
||||
Transaction[] public transactionQueue;
|
||||
|
||||
/// @notice The index of the earliest unincluded enforced transaction.
|
||||
uint256 public nextUnincluedIndex;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
function initialize(address _rollup) external initializer {
|
||||
OwnableUpgradeable.__Ownable_init();
|
||||
|
||||
rollup = _rollup;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IEnforcedTransactionQueue
|
||||
function isTransactionExpired(
|
||||
uint256 _l2Timestamp,
|
||||
uint256 _index,
|
||||
bytes32 _transactionHash
|
||||
) external view override returns (bool) {
|
||||
return
|
||||
// transaction already included
|
||||
_index < nextUnincluedIndex ||
|
||||
// transaction hash mismatch
|
||||
transactionQueue[_index].hash != _transactionHash ||
|
||||
// transaction expired
|
||||
transactionQueue[_index].deadline < _l2Timestamp;
|
||||
}
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @inheritdoc IEnforcedTransactionQueue
|
||||
function enqueueTransaction(bytes calldata _rawTx) external payable override {
|
||||
_validateRawTransaction(_rawTx);
|
||||
// @todo prevent spam attacks
|
||||
|
||||
bytes32 _hash = keccak256(_rawTx);
|
||||
transactionQueue.push(Transaction(_hash, block.timestamp + FORCE_INCLUSION_DELAY));
|
||||
|
||||
emit EnqueueTransaction(_hash, _rawTx);
|
||||
}
|
||||
|
||||
/// @inheritdoc IEnforcedTransactionQueue
|
||||
function includeTransaction(uint256 _nextIndex) external override {
|
||||
require(msg.sender == rollup, "sender not rollup");
|
||||
|
||||
uint256 _nextUnincluedIndex = nextUnincluedIndex;
|
||||
require(_nextIndex > nextUnincluedIndex, "index too small");
|
||||
|
||||
nextUnincluedIndex = _nextIndex;
|
||||
|
||||
emit IncludeTransaction(_nextUnincluedIndex, _nextIndex);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Force include enforced transaction in L2 by owner
|
||||
/// @param _nextIndex The next unincluded transaction index.
|
||||
function forceIncludeTransaction(uint256 _nextIndex) external onlyOwner {
|
||||
uint256 _nextUnincluedIndex = nextUnincluedIndex;
|
||||
require(_nextIndex > _nextUnincluedIndex, "index too small");
|
||||
|
||||
nextUnincluedIndex = _nextIndex;
|
||||
|
||||
emit ForceIncludeTransaction(_nextUnincluedIndex, _nextIndex);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to validate the transaction RLP encoding.
|
||||
/// @param _rawTx The RLP encoding of the enforced transation.
|
||||
function _validateRawTransaction(bytes calldata _rawTx) internal view {
|
||||
// @todo finish logic
|
||||
}
|
||||
}
|
||||
50
contracts/src/L1/rollup/IEnforcedTransactionQueue.sol
Normal file
50
contracts/src/L1/rollup/IEnforcedTransactionQueue.sol
Normal file
@@ -0,0 +1,50 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IEnforcedTransactionQueue {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when an enforced transation is appended.
|
||||
/// @param transactionHash The hash of the appended transaction.
|
||||
/// @param rawTx The RLP encoding of transaction should be submitted to L2.
|
||||
event EnqueueTransaction(bytes32 indexed transactionHash, bytes rawTx);
|
||||
|
||||
/// @notice Emitted when some transactions are included in L2.
|
||||
/// @param fromIndex The start index of `transactionQueue`, inclusive.
|
||||
/// @param toIndex The end index of `transactionQueue`, not inclusive.
|
||||
event IncludeTransaction(uint256 fromIndex, uint256 toIndex);
|
||||
|
||||
/// @notice Emitted when some transactions are included in L2 by owner.
|
||||
/// @param fromIndex The start index of `transactionQueue`, inclusive.
|
||||
/// @param toIndex The end index of `transactionQueue`, not inclusive.
|
||||
event ForceIncludeTransaction(uint256 fromIndex, uint256 toIndex);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return whether the transaction is expired.
|
||||
/// @param l2Timestamp The L2 block timestamp of the transaction.
|
||||
/// @param index The index of the transaction in `transactionQueue`.
|
||||
/// @param transactionHash The hash of the transaction.
|
||||
function isTransactionExpired(
|
||||
uint256 l2Timestamp,
|
||||
uint256 index,
|
||||
bytes32 transactionHash
|
||||
) external view returns (bool);
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @notice Enqueue an enforced transation.
|
||||
/// @param rawTx The RLP encoding of the enforced transation.
|
||||
function enqueueTransaction(bytes calldata rawTx) external payable;
|
||||
|
||||
/// @notice Include enforced transaction in L2 by ZK Rollup contract.
|
||||
/// @param nextIndex The next unincluded transaction index.
|
||||
function includeTransaction(uint256 nextIndex) external;
|
||||
}
|
||||
33
contracts/src/L1/rollup/IL1MessageQueue.sol
Normal file
33
contracts/src/L1/rollup/IL1MessageQueue.sol
Normal file
@@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IL1MessageQueue {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when a L1 to L2 message is appended.
|
||||
/// @param msgHash The hash of the appended message.
|
||||
event AppendMessage(bytes32 indexed msgHash);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the index of next appended message.
|
||||
/// @dev Also the total number of appended messages.
|
||||
function nextMessageIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Check whether the message with hash `_msgHash` exists.
|
||||
/// @param _msgHash The hash of the message to check.
|
||||
function hasMessage(bytes32 _msgHash) external view returns (bool);
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @notice Append a L1 to L2 message into this contract.
|
||||
/// @param _msgHash The hash of the appended message.
|
||||
function appendMessage(bytes32 _msgHash) external;
|
||||
}
|
||||
@@ -3,7 +3,9 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IZKRollup {
|
||||
/**************************************** Events ****************************************/
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when a new batch is commited.
|
||||
/// @param _batchHash The hash of the batch
|
||||
@@ -21,9 +23,12 @@ interface IZKRollup {
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
/// @dev The transanction struct
|
||||
struct Layer2Transaction {
|
||||
address caller;
|
||||
uint64 nonce;
|
||||
address target;
|
||||
uint64 gas;
|
||||
@@ -47,6 +52,7 @@ interface IZKRollup {
|
||||
uint64 timestamp;
|
||||
bytes extraData;
|
||||
Layer2Transaction[] txs;
|
||||
bytes32 messageRoot;
|
||||
}
|
||||
|
||||
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
|
||||
@@ -57,7 +63,9 @@ interface IZKRollup {
|
||||
Layer2BlockHeader[] blocks;
|
||||
}
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return whether the block is finalized by block hash.
|
||||
/// @param blockHash The hash of the block to query.
|
||||
@@ -67,41 +75,17 @@ interface IZKRollup {
|
||||
/// @param blockHeight The height of the block to query.
|
||||
function isBlockFinalized(uint256 blockHeight) external view returns (bool);
|
||||
|
||||
/// @notice Return the message hash by index.
|
||||
/// @param _index The index to query.
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the index of the first queue element not yet executed.
|
||||
function getNextQueueIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the layer 2 block gas limit.
|
||||
/// @param _blockNumber The block number to query
|
||||
function layer2GasLimit(uint256 _blockNumber) external view returns (uint256);
|
||||
|
||||
/// @notice Verify a state proof for message relay.
|
||||
/// @dev add more fields.
|
||||
function verifyMessageStateProof(uint256 _batchIndex, uint256 _blockHeight) external view returns (bool);
|
||||
/// @notice Return the merkle root of L2 message tree.
|
||||
/// @param blockHash The hash of the block to query.
|
||||
function getL2MessageRoot(bytes32 blockHash) external view returns (bytes32);
|
||||
|
||||
/**************************************** Mutated Functions ****************************************/
|
||||
|
||||
/// @notice Append a cross chain message to message queue.
|
||||
/// @dev This function should only be called by L1ScrollMessenger for safety.
|
||||
/// @param _sender The address of message sender in layer 1.
|
||||
/// @param _target The address of message recipient in layer 2.
|
||||
/// @param _value The amount of ether sent to recipient in layer 2.
|
||||
/// @param _fee The amount of ether paid to relayer in layer 2.
|
||||
/// @param _deadline The deadline of the message.
|
||||
/// @param _message The content of the message.
|
||||
/// @param _gasLimit Unused, but included for potential forward compatibility considerations.
|
||||
function appendMessage(
|
||||
address _sender,
|
||||
address _target,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external returns (uint256);
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @notice commit a batch in layer 1
|
||||
/// @dev store in a more compacted form later.
|
||||
|
||||
62
contracts/src/L1/rollup/L1MessageQueue.sol
Normal file
62
contracts/src/L1/rollup/L1MessageQueue.sol
Normal file
@@ -0,0 +1,62 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { IL1MessageQueue } from "./IL1MessageQueue.sol";
|
||||
|
||||
/// @title L1MessageQueue
|
||||
/// @notice This contract will hold all L1 to L2 messages.
|
||||
/// Each appended message is assigned with a unique and increasing `uint256` index denoting the message nonce.
|
||||
contract L1MessageQueue is Version, IL1MessageQueue {
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The address of L1ScrollMessenger contract.
|
||||
address public immutable messenger;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @inheritdoc IL1MessageQueue
|
||||
uint256 public override nextMessageIndex;
|
||||
|
||||
/// @notice Mapping from message hash to message existence.
|
||||
mapping(bytes32 => bool) private isMessageSent;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _messenger) {
|
||||
messenger = _messenger;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IL1MessageQueue
|
||||
function hasMessage(bytes32 _msgHash) external view returns (bool) {
|
||||
return isMessageSent[_msgHash];
|
||||
}
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @inheritdoc IL1MessageQueue
|
||||
function appendMessage(bytes32 _msgHash) external {
|
||||
require(msg.sender == messenger, "Only callable by the L1ScrollMessenger");
|
||||
|
||||
require(!isMessageSent[_msgHash], "Message is already appended.");
|
||||
isMessageSent[_msgHash] = true;
|
||||
emit AppendMessage(_msgHash);
|
||||
|
||||
unchecked {
|
||||
nextMessageIndex += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ pragma solidity ^0.8.0;
|
||||
import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
|
||||
|
||||
import { IZKRollup } from "./IZKRollup.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { RollupVerifier } from "../../libraries/verifier/RollupVerifier.sol";
|
||||
|
||||
// solhint-disable reason-string
|
||||
@@ -16,26 +17,26 @@ import { RollupVerifier } from "../../libraries/verifier/RollupVerifier.sol";
|
||||
/// 2. the block tree generated by layer 2 and it's status.
|
||||
///
|
||||
/// @dev the message queue is not used yet, the offline relayer only use events in `L1ScrollMessenger`.
|
||||
contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
/**************************************** Events ****************************************/
|
||||
contract ZKRollup is Version, OwnableUpgradeable, IZKRollup {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when owner updates address of operator
|
||||
/// @param _oldOperator The address of old operator.
|
||||
/// @param _newOperator The address of new operator.
|
||||
event UpdateOperator(address _oldOperator, address _newOperator);
|
||||
|
||||
/// @notice Emitted when owner updates address of messenger
|
||||
/// @param _oldMesssenger The address of old messenger contract.
|
||||
/// @param _newMesssenger The address of new messenger contract.
|
||||
event UpdateMesssenger(address _oldMesssenger, address _newMesssenger);
|
||||
|
||||
/**************************************** Variables ****************************************/
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct Layer2BlockStored {
|
||||
bytes32 parentHash;
|
||||
bytes32 transactionRoot;
|
||||
uint64 blockHeight;
|
||||
uint64 batchIndex;
|
||||
bytes32 messageRoot;
|
||||
}
|
||||
|
||||
struct Layer2BatchStored {
|
||||
@@ -45,21 +46,17 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
bool verified;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint256 public layer2ChainId;
|
||||
|
||||
/// @notice The address of L1ScrollMessenger.
|
||||
address public messenger;
|
||||
|
||||
/// @notice The address of operator.
|
||||
address public operator;
|
||||
|
||||
/// @dev The index of the first queue element not yet executed.
|
||||
/// The operator should change this variable when new block is commited.
|
||||
uint256 private nextQueueIndex;
|
||||
|
||||
/// @dev The list of appended message hash.
|
||||
bytes32[] private messageQueue;
|
||||
// @todo change to ring buffer to save gas usage.
|
||||
|
||||
/// @notice The latest finalized batch id.
|
||||
bytes32 public lastFinalizedBatchID;
|
||||
@@ -73,21 +70,33 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
/// @notice Mapping from batch index to finalized batch id.
|
||||
mapping(uint256 => bytes32) public finalizedBatches;
|
||||
|
||||
/// @notice The address of EnforcedTransactionQueue.
|
||||
address public enforcedTransactionQueue;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier OnlyOperator() {
|
||||
// @todo In the decentralize mode, it should be only called by a list of validator.
|
||||
require(msg.sender == operator, "caller not operator");
|
||||
_;
|
||||
}
|
||||
|
||||
/**************************************** Constructor ****************************************/
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
function initialize(uint256 _chainId) public initializer {
|
||||
function initialize(uint256 _chainId, address _enforcedTransactionQueue) public initializer {
|
||||
OwnableUpgradeable.__Ownable_init();
|
||||
|
||||
layer2ChainId = _chainId;
|
||||
enforcedTransactionQueue = _enforcedTransactionQueue;
|
||||
}
|
||||
|
||||
/**************************************** View Functions ****************************************/
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function isBlockFinalized(bytes32 _blockHash) external view returns (bool) {
|
||||
@@ -107,21 +116,6 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
return _blockHeight <= _maxHeight;
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function getMessageHashByIndex(uint256 _index) external view returns (bytes32) {
|
||||
return messageQueue[_index];
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function getNextQueueIndex() external view returns (uint256) {
|
||||
return nextQueueIndex;
|
||||
}
|
||||
|
||||
/// @notice Return the total number of appended message.
|
||||
function getQeueuLength() external view returns (uint256) {
|
||||
return messageQueue.length;
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function layer2GasLimit(uint256) public view virtual returns (uint256) {
|
||||
// hardcode for now
|
||||
@@ -129,47 +123,16 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
}
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function verifyMessageStateProof(uint256 _batchIndex, uint256 _blockHeight) external view returns (bool) {
|
||||
bytes32 _batchId = finalizedBatches[_batchIndex];
|
||||
// check if batch is verified
|
||||
if (_batchId == bytes32(0)) return false;
|
||||
|
||||
uint256 _maxBlockHeightInBatch = blocks[batches[_batchId].batchHash].blockHeight;
|
||||
// check block height is in batch range.
|
||||
if (_maxBlockHeightInBatch == 0) return _blockHeight == 0;
|
||||
else {
|
||||
uint256 _minBlockHeightInBatch = blocks[batches[_batchId].parentHash].blockHeight + 1;
|
||||
return _minBlockHeightInBatch <= _blockHeight && _blockHeight <= _maxBlockHeightInBatch;
|
||||
}
|
||||
function getL2MessageRoot(bytes32 _blockHash) external view returns (bytes32) {
|
||||
return blocks[_blockHash].messageRoot;
|
||||
}
|
||||
|
||||
/**************************************** Mutated Functions ****************************************/
|
||||
|
||||
/// @inheritdoc IZKRollup
|
||||
function appendMessage(
|
||||
address _sender,
|
||||
address _target,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external override returns (uint256) {
|
||||
// currently make only messenger to call
|
||||
require(msg.sender == messenger, "caller not messenger");
|
||||
uint256 _nonce = messageQueue.length;
|
||||
|
||||
// @todo may change it later
|
||||
bytes32 _messageHash = keccak256(
|
||||
abi.encodePacked(_sender, _target, _value, _fee, _deadline, _nonce, _message, _gasLimit)
|
||||
);
|
||||
messageQueue.push(_messageHash);
|
||||
|
||||
return _nonce;
|
||||
}
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
function importGenesisBlock(Layer2BlockHeader memory _genesis) external onlyOwner {
|
||||
function importGenesisBlock(Layer2BlockHeader memory _genesis) external {
|
||||
require(lastFinalizedBatchID == bytes32(0), "Genesis block imported");
|
||||
require(_genesis.blockHash != bytes32(0), "Block hash is zero");
|
||||
require(_genesis.blockHeight == 0, "Block is not genesis");
|
||||
@@ -231,6 +194,7 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
Layer2BlockHeader memory _block = _batch.blocks[i];
|
||||
Layer2BlockStored storage _blockStored = blocks[_block.blockHash];
|
||||
_blockStored.parentHash = _block.parentHash;
|
||||
// @todo check the status of EnforcedTransactionQueue
|
||||
_blockStored.transactionRoot = _computeTransactionRoot(_block.txs);
|
||||
_blockStored.blockHeight = _block.blockHeight;
|
||||
_blockStored.batchIndex = _batch.batchIndex;
|
||||
@@ -298,7 +262,9 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
|
||||
}
|
||||
|
||||
/**************************************** Restricted Functions ****************************************/
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of operator.
|
||||
/// @dev This function can only called by contract owner.
|
||||
@@ -312,19 +278,9 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
emit UpdateOperator(_oldOperator, _newOperator);
|
||||
}
|
||||
|
||||
/// @notice Update the address of messenger.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newMessenger The new messenger address to update.
|
||||
function updateMessenger(address _newMessenger) external onlyOwner {
|
||||
address _oldMessenger = messenger;
|
||||
require(_oldMessenger != _newMessenger, "change to same messenger");
|
||||
|
||||
messenger = _newMessenger;
|
||||
|
||||
emit UpdateMesssenger(_oldMessenger, _newMessenger);
|
||||
}
|
||||
|
||||
/**************************************** Internal Functions ****************************************/
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
function _verifyBlockHash(Layer2BlockHeader memory) internal pure returns (bool) {
|
||||
// @todo finish logic after more discussions
|
||||
@@ -353,7 +309,6 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
|
||||
// @todo use rlp
|
||||
_hashes[i] = keccak256(
|
||||
abi.encode(
|
||||
_txn[i].caller,
|
||||
_txn[i].nonce,
|
||||
_txn[i].target,
|
||||
_txn[i].gas,
|
||||
|
||||
@@ -5,6 +5,15 @@ pragma solidity ^0.8.0;
|
||||
import { IScrollMessenger } from "../libraries/IScrollMessenger.sol";
|
||||
|
||||
interface IL2ScrollMessenger is IScrollMessenger {
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct L1MessageProof {
|
||||
bytes32 blockHash;
|
||||
bytes stateRootProof;
|
||||
}
|
||||
|
||||
/**************************************** Mutate Functions ****************************************/
|
||||
|
||||
/// @notice execute L1 => L2 message
|
||||
@@ -16,13 +25,15 @@ interface IL2ScrollMessenger is IScrollMessenger {
|
||||
/// @param _deadline The deadline of the message.
|
||||
/// @param _nonce The nonce of the message to avoid replay attack.
|
||||
/// @param _message The content of the message.
|
||||
function relayMessage(
|
||||
/// @param _proof The message proof.
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
bytes memory _message,
|
||||
L1MessageProof calldata _proof
|
||||
) external;
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { IL2ScrollMessenger, IScrollMessenger } from "./IL2ScrollMessenger.sol";
|
||||
import { L2ToL1MessagePasser } from "./predeploys/L2ToL1MessagePasser.sol";
|
||||
import { L2MessageQueue } from "./predeploys/L2MessageQueue.sol";
|
||||
import { IL1BlockContainer } from "./predeploys/IL1BlockContainer.sol";
|
||||
import { OwnableBase } from "../libraries/common/OwnableBase.sol";
|
||||
import { Version } from "../libraries/common/Version.sol";
|
||||
import { IGasOracle } from "../libraries/oracle/IGasOracle.sol";
|
||||
import { ScrollConstants } from "../libraries/ScrollConstants.sol";
|
||||
import { ScrollMessengerBase } from "../libraries/ScrollMessengerBase.sol";
|
||||
@@ -18,7 +20,7 @@ import { ScrollMessengerBase } from "../libraries/ScrollMessengerBase.sol";
|
||||
///
|
||||
/// @dev It should be a predeployed contract in layer 2 and should hold infinite amount
|
||||
/// of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.
|
||||
contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMessenger {
|
||||
contract L2ScrollMessenger is Version, ScrollMessengerBase, OwnableBase, IL2ScrollMessenger {
|
||||
/**************************************** Variables ****************************************/
|
||||
|
||||
/// @notice Mapping from relay id to relay status.
|
||||
@@ -27,20 +29,26 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
/// @notice Mapping from message hash to execution status.
|
||||
mapping(bytes32 => bool) public isMessageExecuted;
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/// @notice Contract to store the sent message.
|
||||
L2ToL1MessagePasser public messagePasser;
|
||||
L2MessageQueue public messageQueue;
|
||||
|
||||
/// @notice The contract contains the list of L1 blocks.
|
||||
IL1BlockContainer public blockContainer;
|
||||
|
||||
constructor(address _owner) {
|
||||
ScrollMessengerBase._initialize();
|
||||
owner = _owner;
|
||||
_transferOwnership(_owner);
|
||||
|
||||
// initialize to a nonzero value
|
||||
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
|
||||
|
||||
messagePasser = new L2ToL1MessagePasser(address(this));
|
||||
messageQueue = new L2MessageQueue(address(this));
|
||||
}
|
||||
|
||||
function setBlockContainer(address _blockContainer) external {
|
||||
require(address(blockContainer) == address(0), "already set");
|
||||
|
||||
blockContainer = IL1BlockContainer(_blockContainer);
|
||||
}
|
||||
|
||||
/**************************************** Mutated Functions ****************************************/
|
||||
@@ -60,7 +68,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
uint256 _minFee = gasOracle == address(0) ? 0 : IGasOracle(gasOracle).estimateMessageFee(msg.sender, _to, _message);
|
||||
require(_fee >= _minFee, "fee too small");
|
||||
|
||||
uint256 _nonce = messageNonce;
|
||||
uint256 _nonce = messageQueue.nextMessageIndex();
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
@@ -68,43 +76,49 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
|
||||
messagePasser.passMessageToL1(_msghash);
|
||||
messageQueue.appendMessage(_msghash);
|
||||
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
|
||||
unchecked {
|
||||
messageNonce = _nonce + 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2ScrollMessenger
|
||||
function relayMessage(
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
bytes memory _message,
|
||||
L1MessageProof calldata _proof
|
||||
) external override {
|
||||
// anti reentrance
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
|
||||
// @todo only privileged accounts can call
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "Already in execution");
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
require(_deadline >= block.timestamp, "Message expired");
|
||||
// @note disable for now since we may encounter various situation in testnet.
|
||||
// require(_deadline >= block.timestamp, "Message expired");
|
||||
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
|
||||
require(!isMessageExecuted[_msghash], "Message successfully executed");
|
||||
|
||||
{
|
||||
// @note use blockContainer = address(0) to skip verification in hardhat tests
|
||||
IL1BlockContainer _blockContainer = blockContainer;
|
||||
require(
|
||||
address(_blockContainer) == address(0) ||
|
||||
_blockContainer.verifyMessageInclusionStatus(_proof.blockHash, _msghash, _proof.stateRootProof),
|
||||
"Invalid message proof"
|
||||
);
|
||||
}
|
||||
|
||||
// @todo check `_to` address to avoid attack.
|
||||
|
||||
// @todo take fee and distribute to relayer later.
|
||||
|
||||
// @note This usually will never happen, just in case.
|
||||
require(_from != xDomainMessageSender, "invalid message sender");
|
||||
require(_from != xDomainMessageSender, "Invalid message sender");
|
||||
|
||||
xDomainMessageSender = _from;
|
||||
// solhint-disable-next-line avoid-low-level-calls
|
||||
@@ -135,6 +149,9 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
bytes memory,
|
||||
uint256
|
||||
) external virtual override {
|
||||
// @todo
|
||||
// 1. use blockContainer.verifyMessageExecutionStatus to check whether the message is executed.
|
||||
// 2. use blockContainer.getBlockTimestamp to check the expiration.
|
||||
revert("not supported");
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import { IERC20Upgradeable } from "@openzeppelin/contracts-upgradeable/token/ERC
|
||||
import { IL2ERC20Gateway, L2ERC20Gateway } from "./L2ERC20Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IL1ERC20Gateway } from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import { IScrollStandardERC20 } from "../../libraries/token/IScrollStandardERC20.sol";
|
||||
|
||||
@@ -16,7 +17,7 @@ import { IScrollStandardERC20 } from "../../libraries/token/IScrollStandardERC20
|
||||
/// finalize deposit the tokens from layer 1.
|
||||
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// tokens will be minted and transfered to the recipient.
|
||||
contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
contract L2CustomERC20Gateway is Version, OwnableUpgradeable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC20 token is updated.
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ERC1155HolderUpgradeable } from "@openzeppelin/contracts-upgradeable/to
|
||||
import { IL2ERC1155Gateway } from "./IL2ERC1155Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IL1ERC1155Gateway } from "../../L1/gateways/IL1ERC1155Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import { IScrollERC1155 } from "../../libraries/token/IScrollERC1155.sol";
|
||||
|
||||
@@ -20,7 +21,7 @@ import { IScrollERC1155 } from "../../libraries/token/IScrollERC1155.sol";
|
||||
///
|
||||
/// This will be changed if we have more specific scenarios.
|
||||
// @todo Current implementation doesn't support calling from `L2GatewayRouter`.
|
||||
contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL2ERC1155Gateway {
|
||||
contract L2ERC1155Gateway is Version, OwnableUpgradeable, ERC1155HolderUpgradeable, ScrollGatewayBase, IL2ERC1155Gateway {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC1155 token is updated.
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { IL2ERC20Gateway } from "./IL2ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
abstract contract L2ERC20Gateway is IL2ERC20Gateway {
|
||||
abstract contract L2ERC20Gateway is Version, IL2ERC20Gateway {
|
||||
/// @inheritdoc IL2ERC20Gateway
|
||||
function withdrawERC20(
|
||||
address _token,
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ERC721HolderUpgradeable } from "@openzeppelin/contracts-upgradeable/tok
|
||||
import { IL2ERC721Gateway } from "./IL2ERC721Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IL1ERC721Gateway } from "../../L1/gateways/IL1ERC721Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import { IScrollERC721 } from "../../libraries/token/IScrollERC721.sol";
|
||||
|
||||
@@ -20,7 +21,7 @@ import { IScrollERC721 } from "../../libraries/token/IScrollERC721.sol";
|
||||
///
|
||||
/// This will be changed if we have more specific scenarios.
|
||||
// @todo Current implementation doesn't support calling from `L2GatewayRouter`.
|
||||
contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL2ERC721Gateway {
|
||||
contract L2ERC721Gateway is Version, OwnableUpgradeable, ERC721HolderUpgradeable, ScrollGatewayBase, IL2ERC721Gateway {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC721 token is updated.
|
||||
|
||||
@@ -8,6 +8,7 @@ import { IL2GatewayRouter } from "./IL2GatewayRouter.sol";
|
||||
import { IL2ERC20Gateway } from "./IL2ERC20Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IL1GatewayRouter } from "../../L1/gateways/IL1GatewayRouter.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { IScrollGateway } from "../../libraries/gateway/IScrollGateway.sol";
|
||||
import { ScrollGatewayBase } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import { IScrollStandardERC20 } from "../../libraries/token/IScrollStandardERC20.sol";
|
||||
@@ -17,7 +18,7 @@ import { IScrollStandardERC20 } from "../../libraries/token/IScrollStandardERC20
|
||||
/// All deposited tokens are routed to corresponding gateways.
|
||||
/// @dev One can also use this contract to query L1/L2 token address mapping.
|
||||
/// In the future, ERC-721 and ERC-1155 tokens will be added to the router too.
|
||||
contract L2GatewayRouter is OwnableUpgradeable, ScrollGatewayBase, IL2GatewayRouter {
|
||||
contract L2GatewayRouter is Version, OwnableUpgradeable, ScrollGatewayBase, IL2GatewayRouter {
|
||||
/**************************************** Events ****************************************/
|
||||
|
||||
event SetDefaultERC20Gateway(address indexed _defaultERC20Gateway);
|
||||
|
||||
@@ -10,6 +10,7 @@ import { Address } from "@openzeppelin/contracts/utils/Address.sol";
|
||||
import { IL2ERC20Gateway, L2ERC20Gateway } from "./L2ERC20Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IL1ERC20Gateway } from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { IScrollStandardERC20 } from "../../libraries/token/IScrollStandardERC20.sol";
|
||||
import { ScrollStandardERC20 } from "../../libraries/token/ScrollStandardERC20.sol";
|
||||
import { IScrollStandardERC20Factory } from "../../libraries/token/IScrollStandardERC20Factory.sol";
|
||||
@@ -21,7 +22,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
/// @dev The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality
|
||||
/// should use a separate gateway.
|
||||
contract L2StandardERC20Gateway is Initializable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
contract L2StandardERC20Gateway is Version, Initializable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
using SafeERC20 for IERC20;
|
||||
using Address for address;
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import { IL2ERC20Gateway, L2ERC20Gateway } from "./L2ERC20Gateway.sol";
|
||||
import { IL2ScrollMessenger } from "../IL2ScrollMessenger.sol";
|
||||
import { IWETH } from "../../interfaces/IWETH.sol";
|
||||
import { IL1ERC20Gateway } from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
/// @title L2WETHGateway
|
||||
@@ -19,7 +20,7 @@ import { ScrollGatewayBase, IScrollGateway } from "../../libraries/gateway/Scrol
|
||||
/// then the Ether will be sent to the `L2ScrollMessenger` contract.
|
||||
/// On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then
|
||||
/// wrapped as WETH and finally transfer to recipient.
|
||||
contract L2WETHGateway is Initializable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
contract L2WETHGateway is Version, Initializable, ScrollGatewayBase, L2ERC20Gateway {
|
||||
using SafeERC20 for IERC20;
|
||||
|
||||
/**************************************** Variables ****************************************/
|
||||
|
||||
79
contracts/src/L2/predeploys/IL1BlockContainer.sol
Normal file
79
contracts/src/L2/predeploys/IL1BlockContainer.sol
Normal file
@@ -0,0 +1,79 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IL1BlockContainer {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when a block is imported.
|
||||
/// @param blockHash The hash of the imported block.
|
||||
/// @param blockHeight The height of the imported block.
|
||||
/// @param blockTimestamp The timestamp of the imported block.
|
||||
/// @param baseFee The base fee of the imported block.
|
||||
/// @param stateRoot The state root of the imported block.
|
||||
event ImportBlock(bytes32 indexed blockHash, uint256 blockHeight, uint256 blockTimestamp, uint256 baseFee, bytes32 stateRoot);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the latest imported block hash
|
||||
function latestBlockHash() external view returns (bytes32);
|
||||
|
||||
/// @notice Return the latest imported L1 base fee
|
||||
function latestBaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest imported block number
|
||||
function latestBlockNumber() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest imported block timestamp
|
||||
function latestBlockTimestamp() external view returns (uint256);
|
||||
|
||||
/// @notice Check whether the message is included in the corresponding L1 block.
|
||||
/// @param blockHash The block hash where the message should in.
|
||||
/// @param msgHash The hash of the message to check.
|
||||
/// @param proof The encoded storage proof from eth_getProof.
|
||||
/// @return included Return true is the message is included in L1, otherwise return false.
|
||||
function verifyMessageInclusionStatus(
|
||||
bytes32 blockHash,
|
||||
bytes32 msgHash,
|
||||
bytes calldata proof
|
||||
) external view returns (bool included);
|
||||
|
||||
/// @notice Check whether the message is executed in the corresponding L1 block.
|
||||
/// @param blockHash The block hash where the message should in.
|
||||
/// @param msgHash The hash of the message to check.
|
||||
/// @param proof The encoded storage proof from eth_getProof.
|
||||
/// @return executed Return true is the message is executed in L1, otherwise return false.
|
||||
function verifyMessageExecutionStatus(
|
||||
bytes32 blockHash,
|
||||
bytes32 msgHash,
|
||||
bytes calldata proof
|
||||
) external view returns (bool executed);
|
||||
|
||||
/// @notice Return the state root of given block.
|
||||
/// @param blockHash The block hash to query.
|
||||
/// @return stateRoot The state root of the block.
|
||||
function getStateRoot(bytes32 blockHash) external view returns (bytes32 stateRoot);
|
||||
|
||||
/// @notice Return the block timestamp of given block.
|
||||
/// @param blockHash The block hash to query.
|
||||
/// @return timestamp The corresponding block timestamp.
|
||||
function getBlockTimestamp(bytes32 blockHash) external view returns (uint256 timestamp);
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @notice Import L1 block header to this contract.
|
||||
/// @param blockHash The hash of block.
|
||||
/// @param blockHeaderRLP The RLP encoding of L1 block.
|
||||
/// @param signature The ETH 2.0 signatures for the block header.
|
||||
function importBlockHeader(
|
||||
bytes32 blockHash,
|
||||
bytes calldata blockHeaderRLP,
|
||||
bytes calldata signature
|
||||
) external;
|
||||
}
|
||||
50
contracts/src/L2/predeploys/IL2GasPriceOracle.sol
Normal file
50
contracts/src/L2/predeploys/IL2GasPriceOracle.sol
Normal file
@@ -0,0 +1,50 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IL2GasPriceOracle {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when current fee overhead is updated.
|
||||
/// @param overhead The current fee overhead updated.
|
||||
event OverheadUpdated(uint256 overhead);
|
||||
|
||||
/// @notice Emitted when current fee scalar is updated.
|
||||
/// @param scalar The current fee scalar updated.
|
||||
event ScalarUpdated(uint256 scalar);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the current L2 base fee.
|
||||
function baseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current L2 gas price (base fee).
|
||||
function gasPrice() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 fee overhead.
|
||||
function overhead() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 fee scalar.
|
||||
function scalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 base fee.
|
||||
function l1BaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function getL1Fee(bytes memory data) external view returns (uint256);
|
||||
|
||||
/// @notice Computes the amount of L1 gas used for a transaction. Adds the overhead which
|
||||
/// represents the per-transaction gas overhead of posting the transaction and state
|
||||
/// roots to L1. Adds 68 bytes of padding to account for the fact that the input does
|
||||
/// not have a signature.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function getL1GasUsed(bytes memory data) external view returns (uint256);
|
||||
}
|
||||
367
contracts/src/L2/predeploys/L1BlockContainer.sol
Normal file
367
contracts/src/L2/predeploys/L1BlockContainer.sol
Normal file
@@ -0,0 +1,367 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol";
|
||||
|
||||
import { IL1BlockContainer } from "./IL1BlockContainer.sol";
|
||||
|
||||
import { OwnableBase } from "../../libraries/common/OwnableBase.sol";
|
||||
import { IWhitelist } from "../../libraries/common/IWhitelist.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
import { PatriciaMerkleTrieVerifier } from "../../libraries/verifier/PatriciaMerkleTrieVerifier.sol";
|
||||
|
||||
/// @title L1BlockContainer
|
||||
/// @notice This contract will maintain the list of blocks proposed in L1.
|
||||
contract L1BlockContainer is Version, Initializable, OwnableBase, IL1BlockContainer {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when owner updates whitelist contract.
|
||||
/// @param _oldWhitelist The address of old whitelist contract.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The address of L1MessageQueue contract.
|
||||
address public immutable messageQueue;
|
||||
|
||||
/// @notice The address of L1ScrollMessenger contract.
|
||||
address public immutable messenger;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
/// @dev Compiler will pack this into single `uint256`.
|
||||
struct BlockMetadata {
|
||||
// The block height.
|
||||
uint64 height;
|
||||
// The block timestamp.
|
||||
uint64 timestamp;
|
||||
// The base fee in the block.
|
||||
uint128 baseFee;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The address of whitelist contract.
|
||||
IWhitelist public whitelist;
|
||||
|
||||
// @todo change to ring buffer to save gas usage.
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
bytes32 public override latestBlockHash;
|
||||
|
||||
/// @notice Mapping from block hash to corresponding state root.
|
||||
mapping(bytes32 => bytes32) public stateRoot;
|
||||
|
||||
/// @notice Mapping from block hash to corresponding block metadata,
|
||||
/// including timestamp and height.
|
||||
mapping(bytes32 => BlockMetadata) public metadata;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _messageQueue, address _messenger) {
|
||||
messageQueue = _messageQueue;
|
||||
messenger = _messenger;
|
||||
}
|
||||
|
||||
function initialize(
|
||||
address _owner,
|
||||
bytes32 _startBlockHash,
|
||||
uint64 _startBlockHeight,
|
||||
uint64 _startBlockTimestamp,
|
||||
uint128 _startBlockBaseFee,
|
||||
bytes32 _startStateRoot
|
||||
) public initializer {
|
||||
_transferOwnership(_owner);
|
||||
|
||||
latestBlockHash = _startBlockHash;
|
||||
stateRoot[_startBlockHash] = _startStateRoot;
|
||||
metadata[_startBlockHash] = BlockMetadata(_startBlockHeight, _startBlockTimestamp, _startBlockBaseFee);
|
||||
|
||||
emit ImportBlock(_startBlockHash, _startBlockHeight, _startBlockTimestamp, _startBlockBaseFee, _startStateRoot);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function latestBaseFee() external override view returns (uint256) {
|
||||
return metadata[latestBlockHash].baseFee;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function latestBlockNumber() external override view returns (uint256) {
|
||||
return metadata[latestBlockHash].height;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function latestBlockTimestamp() external override view returns (uint256) {
|
||||
return metadata[latestBlockHash].timestamp;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function verifyMessageInclusionStatus(
|
||||
bytes32 _blockHash,
|
||||
bytes32 _msgHash,
|
||||
bytes calldata _proof
|
||||
) external view returns (bool) {
|
||||
bytes32 _expectedStateRoot = stateRoot[_blockHash];
|
||||
require(_expectedStateRoot != bytes32(0), "Block not imported");
|
||||
|
||||
bytes32 _storageKey;
|
||||
// `mapping(bytes32 => bool) public isMessageSent` is the 2-nd slot of contract `L1MessageQueue`.
|
||||
assembly {
|
||||
mstore(0x00, _msgHash)
|
||||
mstore(0x20, 1)
|
||||
_storageKey := keccak256(0x00, 0x40)
|
||||
}
|
||||
|
||||
(bytes32 _computedStateRoot, bytes32 _storageValue) = PatriciaMerkleTrieVerifier.verifyPatriciaProof(
|
||||
messageQueue,
|
||||
_storageKey,
|
||||
_proof
|
||||
);
|
||||
require(_computedStateRoot == _expectedStateRoot, "State root mismatch");
|
||||
|
||||
return uint256(_storageValue) == 1;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function verifyMessageExecutionStatus(
|
||||
bytes32 _blockHash,
|
||||
bytes32 _msgHash,
|
||||
bytes calldata _proof
|
||||
) external view returns (bool) {
|
||||
bytes32 _expectedStateRoot = stateRoot[_blockHash];
|
||||
require(_expectedStateRoot != bytes32(0), "Block not imported");
|
||||
|
||||
bytes32 _storageKey;
|
||||
// `mapping(bytes32 => bool) public isMessageExecuted` is the 107-th slot of contract `L1ScrollMessenger`.
|
||||
assembly {
|
||||
mstore(0x00, _msgHash)
|
||||
mstore(0x20, 106)
|
||||
_storageKey := keccak256(0x00, 0x40)
|
||||
}
|
||||
|
||||
(bytes32 _computedStateRoot, bytes32 _storageValue) = PatriciaMerkleTrieVerifier.verifyPatriciaProof(
|
||||
messageQueue,
|
||||
_storageKey,
|
||||
_proof
|
||||
);
|
||||
require(_computedStateRoot == _expectedStateRoot, "State root mismatch");
|
||||
|
||||
return uint256(_storageValue) == 1;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function getStateRoot(bytes32 _blockHash) external view returns (bytes32) {
|
||||
return stateRoot[_blockHash];
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function getBlockTimestamp(bytes32 _blockHash) external view returns (uint256) {
|
||||
return metadata[_blockHash].timestamp;
|
||||
}
|
||||
|
||||
/****************************
|
||||
* Public Mutated Functions *
|
||||
****************************/
|
||||
|
||||
/// @inheritdoc IL1BlockContainer
|
||||
function importBlockHeader(
|
||||
bytes32 _blockHash,
|
||||
bytes calldata _blockHeaderRLP,
|
||||
bytes calldata
|
||||
) external {
|
||||
// @todo remove this when ETH 2.0 signature verification is ready.
|
||||
{
|
||||
IWhitelist _whitelist = whitelist;
|
||||
require(address(_whitelist) == address(0) || _whitelist.isSenderAllowed(msg.sender), "Not whitelist sender");
|
||||
}
|
||||
|
||||
// The encoding order in block header is
|
||||
// 1. ParentHash: 32 bytes
|
||||
// 2. UncleHash: 32 bytes
|
||||
// 3. Coinbase: 20 bytes
|
||||
// 4. StateRoot: 32 bytes
|
||||
// 5. TransactionsRoot: 32 bytes
|
||||
// 6. ReceiptsRoot: 32 bytes
|
||||
// 7. LogsBloom: 256 bytes
|
||||
// 8. Difficulty: uint
|
||||
// 9. BlockHeight: uint
|
||||
// 10. GasLimit: uint64
|
||||
// 11. GasUsed: uint64
|
||||
// 12. BlockTimestamp: uint64
|
||||
// 13. ExtraData: several bytes
|
||||
// 14. MixHash: 32 bytes
|
||||
// 15. BlockNonce: 8 bytes
|
||||
// 16. BaseFee: uint // optional
|
||||
bytes32 _parentHash;
|
||||
bytes32 _stateRoot;
|
||||
uint64 _height;
|
||||
uint64 _timestamp;
|
||||
uint128 _baseFee;
|
||||
|
||||
assembly {
|
||||
// reverts with error `msg`.
|
||||
// make sure the length of error string <= 32
|
||||
function revertWith(msg) {
|
||||
// keccak("Error(string)")
|
||||
mstore(0x00, shl(224, 0x08c379a0))
|
||||
mstore(0x04, 0x20) // str.offset
|
||||
mstore(0x44, msg)
|
||||
let msgLen
|
||||
for {} msg {} {
|
||||
msg := shl(8, msg)
|
||||
msgLen := add(msgLen, 1)
|
||||
}
|
||||
mstore(0x24, msgLen) // str.length
|
||||
revert(0x00, 0x64)
|
||||
}
|
||||
// reverts with `msg` when condition is not matched.
|
||||
// make sure the length of error string <= 32
|
||||
function require(cond, msg) {
|
||||
if iszero(cond) {
|
||||
revertWith(msg)
|
||||
}
|
||||
}
|
||||
// returns the calldata offset of the value and the length in bytes
|
||||
// for the RLP encoded data item at `ptr`. used in `decodeFlat`
|
||||
function decodeValue(ptr) -> dataLen, valueOffset {
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
|
||||
// 0x00 - 0x7f, single byte
|
||||
if lt(b0, 0x80) {
|
||||
// for a single byte whose value is in the [0x00, 0x7f] range,
|
||||
// that byte is its own RLP encoding.
|
||||
dataLen := 1
|
||||
valueOffset := ptr
|
||||
leave
|
||||
}
|
||||
|
||||
// 0x80 - 0xb7, short string/bytes, length <= 55
|
||||
if lt(b0, 0xb8) {
|
||||
// the RLP encoding consists of a single byte with value 0x80
|
||||
// plus the length of the string followed by the string.
|
||||
dataLen := sub(b0, 0x80)
|
||||
valueOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
|
||||
// 0xb8 - 0xbf, long string/bytes, length > 55
|
||||
if lt(b0, 0xc0) {
|
||||
// the RLP encoding consists of a single byte with value 0xb7
|
||||
// plus the length in bytes of the length of the string in binary form,
|
||||
// followed by the length of the string, followed by the string.
|
||||
let lengthBytes := sub(b0, 0xb7)
|
||||
if gt(lengthBytes, 4) {
|
||||
invalid()
|
||||
}
|
||||
|
||||
// load the extended length
|
||||
valueOffset := add(ptr, 1)
|
||||
let extendedLen := calldataload(valueOffset)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
extendedLen := shr(bits, extendedLen)
|
||||
|
||||
dataLen := extendedLen
|
||||
valueOffset := add(valueOffset, lengthBytes)
|
||||
leave
|
||||
}
|
||||
|
||||
revertWith("Not value")
|
||||
}
|
||||
|
||||
let ptr := _blockHeaderRLP.offset
|
||||
let headerPayloadLength
|
||||
{
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
// the input should be a long list
|
||||
if lt(b0, 0xf8) {
|
||||
invalid()
|
||||
}
|
||||
let lengthBytes := sub(b0, 0xf7)
|
||||
if gt(lengthBytes, 32) {
|
||||
invalid()
|
||||
}
|
||||
// load the extended length
|
||||
ptr := add(ptr, 1)
|
||||
headerPayloadLength := calldataload(ptr)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
// compute payload length: extended length + length bytes + 1
|
||||
headerPayloadLength := shr(bits, headerPayloadLength)
|
||||
headerPayloadLength := add(headerPayloadLength, lengthBytes)
|
||||
headerPayloadLength := add(headerPayloadLength, 1)
|
||||
ptr := add(ptr, lengthBytes)
|
||||
}
|
||||
|
||||
let memPtr := mload(0x40)
|
||||
calldatacopy(memPtr, _blockHeaderRLP.offset, headerPayloadLength)
|
||||
let _computedBlockHash := keccak256(memPtr, headerPayloadLength)
|
||||
require(eq(_blockHash, _computedBlockHash), "Block hash mismatch")
|
||||
|
||||
// load 16 vaules
|
||||
for { let i := 0 } lt(i, 16) { i := add(i, 1) } {
|
||||
let len, offset := decodeValue(ptr)
|
||||
// the value we care must have at most 32 bytes
|
||||
if lt(len, 33) {
|
||||
let bits := mul( sub(32, len), 8)
|
||||
let value := calldataload(offset)
|
||||
value := shr(bits, value)
|
||||
mstore(memPtr, value)
|
||||
}
|
||||
memPtr := add(memPtr, 0x20)
|
||||
ptr := add(len, offset)
|
||||
}
|
||||
require(eq(ptr, add(_blockHeaderRLP.offset, _blockHeaderRLP.length)), "Header RLP length mismatch")
|
||||
|
||||
memPtr := mload(0x40)
|
||||
// load parent hash, 1-st entry
|
||||
_parentHash := mload(memPtr)
|
||||
// load state root, 4-th entry
|
||||
_stateRoot := mload(add(memPtr, 0x60))
|
||||
// load block height, 9-th entry
|
||||
_height := mload(add(memPtr, 0x100))
|
||||
// load block timestamp, 12-th entry
|
||||
_timestamp := mload(add(memPtr, 0x160))
|
||||
// load base fee, 16-th entry
|
||||
_baseFee := mload(add(memPtr, 0x1e0))
|
||||
}
|
||||
require(stateRoot[_parentHash] != bytes32(0), "Parent not imported");
|
||||
BlockMetadata memory _parentMetadata = metadata[_parentHash];
|
||||
require(_parentMetadata.height + 1 == _height, "Block height mismatch");
|
||||
require(_parentMetadata.timestamp <= _timestamp, "Parent block has larger timestamp");
|
||||
|
||||
latestBlockHash = _blockHash;
|
||||
stateRoot[_blockHash] = _stateRoot;
|
||||
metadata[_blockHash] = BlockMetadata(_height, _timestamp, _baseFee);
|
||||
|
||||
emit ImportBlock(_blockHash, _height, _timestamp, _baseFee, _stateRoot);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(IWhitelist _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = _newWhitelist;
|
||||
emit UpdateWhitelist(_oldWhitelist, address(_newWhitelist));
|
||||
}
|
||||
}
|
||||
118
contracts/src/L2/predeploys/L2GasPriceOracle.sol
Normal file
118
contracts/src/L2/predeploys/L2GasPriceOracle.sol
Normal file
@@ -0,0 +1,118 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { OwnableBase } from "../../libraries/common/OwnableBase.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
|
||||
import "./IL1BlockContainer.sol";
|
||||
import "./IL2GasPriceOracle.sol";
|
||||
|
||||
contract L2GasPriceOracle is Version, OwnableBase, IL2GasPriceOracle {
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev The precision used in the scalar.
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
|
||||
/// @dev The maximum possible l1 fee overhead.
|
||||
/// Computed based on current l1 block gas limit.
|
||||
uint256 private constant MAX_OVERHEAD = 30000000/16;
|
||||
|
||||
/// @dev The maximum possible l1 fee scale.
|
||||
/// x1000 should be enough.
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
/// @notice The address of L1BlockContainer contract.
|
||||
address public immutable blockContainer;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
uint256 public override overhead;
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
uint256 public override scalar;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address _owner, address _blockContainer) {
|
||||
_transferOwnership(_owner);
|
||||
|
||||
blockContainer = _blockContainer;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
function baseFee() external view override returns (uint256) {
|
||||
return block.basefee;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
function gasPrice() external view override returns (uint256) {
|
||||
return block.basefee;
|
||||
}
|
||||
|
||||
/// @notice Return the latest known l1 base fee.
|
||||
function l1BaseFee() public view override returns (uint256) {
|
||||
return IL1BlockContainer(blockContainer).latestBaseFee();
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
function getL1Fee(bytes memory _data) external view override returns (uint256) {
|
||||
unchecked {
|
||||
uint256 _l1GasUsed = getL1GasUsed(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee();
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
}
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2GasPriceOracle
|
||||
/// @dev See the comments in `OVM_GasPriceOracle1` for more details
|
||||
/// https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts/contracts/L2/predeploys/OVM_GasPriceOracle.sol
|
||||
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
|
||||
uint256 _total = 0;
|
||||
uint256 _length = _data.length;
|
||||
unchecked {
|
||||
for (uint256 i = 0; i < _length; i++) {
|
||||
if (_data[i] == 0) {
|
||||
_total += 4;
|
||||
} else {
|
||||
_total += 16;
|
||||
}
|
||||
}
|
||||
uint256 _unsigned = _total + overhead;
|
||||
return _unsigned + (68 * 16);
|
||||
}
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
require(_scalar <= MAX_SCALE, "exceed maximum scale");
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
}
|
||||
48
contracts/src/L2/predeploys/L2MessageQueue.sol
Normal file
48
contracts/src/L2/predeploys/L2MessageQueue.sol
Normal file
@@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { AppendOnlyMerkleTree } from "../../libraries/common/AppendOnlyMerkleTree.sol";
|
||||
import { Version } from "../../libraries/common/Version.sol";
|
||||
|
||||
/// @title L2MessageQueue
|
||||
/// @notice The original idea is from Optimism, see [OVM_L2ToL1MessagePasser](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts/contracts/L2/predeploys/OVM_L2ToL1MessagePasser.sol).
|
||||
/// The L2 to L1 Message Passer is a utility contract which facilitate an L1 proof of the
|
||||
/// of a message on L2. The L1 Cross Domain Messenger performs this proof in its
|
||||
/// _verifyStorageProof function, which verifies the existence of the transaction hash in this
|
||||
/// contract's `sentMessages` mapping.
|
||||
contract L2MessageQueue is Version, AppendOnlyMerkleTree {
|
||||
/// @notice Emitted when a new message is added to the merkle tree.
|
||||
/// @param index The index of the corresponding message.
|
||||
/// @param messageHash The hash of the corresponding message.
|
||||
event AppendMessage(uint256 index, bytes32 messageHash);
|
||||
|
||||
/// @notice The address of L2ScrollMessenger contract.
|
||||
address public immutable messenger;
|
||||
|
||||
/// @notice Mapping from message hash to sent messages.
|
||||
mapping(bytes32 => bool) public sentMessages;
|
||||
|
||||
constructor(address _messenger) {
|
||||
messenger = _messenger;
|
||||
|
||||
_initializeMerkleTree();
|
||||
}
|
||||
|
||||
/// @notice record the message to merkle tree and compute the new root.
|
||||
/// @param _messageHash The hash of the new added message.
|
||||
function appendMessage(bytes32 _messageHash) external returns (bytes32) {
|
||||
require(msg.sender == messenger, "only messenger");
|
||||
|
||||
require(!sentMessages[_messageHash], "duplicated message");
|
||||
|
||||
sentMessages[_messageHash] = true;
|
||||
|
||||
(uint256 _currentNonce, bytes32 _currentRoot) = _appendMessageHash(_messageHash);
|
||||
|
||||
// We can use the event to compute the merkle tree locally.
|
||||
emit AppendMessage(_currentNonce, _messageHash);
|
||||
|
||||
return _currentRoot;
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/// @title L2ToL1MessagePasser
|
||||
/// @notice The original idea is from Optimism, see [OVM_L2ToL1MessagePasser](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts/contracts/L2/predeploys/OVM_L2ToL1MessagePasser.sol).
|
||||
/// The L2 to L1 Message Passer is a utility contract which facilitate an L1 proof of the
|
||||
/// of a message on L2. The L1 Cross Domain Messenger performs this proof in its
|
||||
/// _verifyStorageProof function, which verifies the existence of the transaction hash in this
|
||||
/// contract's `sentMessages` mapping.
|
||||
contract L2ToL1MessagePasser {
|
||||
address public immutable messenger;
|
||||
|
||||
/// @notice Mapping from message hash to sent messages.
|
||||
mapping(bytes32 => bool) public sentMessages;
|
||||
|
||||
constructor(address _messenger) {
|
||||
messenger = _messenger;
|
||||
}
|
||||
|
||||
function passMessageToL1(bytes32 _messageHash) public {
|
||||
require(msg.sender == messenger, "only messenger");
|
||||
|
||||
require(!sentMessages[_messageHash], "duplicated message");
|
||||
|
||||
sentMessages[_messageHash] = true;
|
||||
}
|
||||
}
|
||||
71
contracts/src/libraries/common/AppendOnlyMerkleTree.sol
Normal file
71
contracts/src/libraries/common/AppendOnlyMerkleTree.sol
Normal file
@@ -0,0 +1,71 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
abstract contract AppendOnlyMerkleTree {
|
||||
/// @dev The maximum height of the withdraw merkle tree.
|
||||
uint256 private constant MAX_TREE_HEIGHT = 40;
|
||||
|
||||
/// @notice The merkle root of the current merkle tree.
|
||||
/// @dev This is actual equal to `branches[n]`.
|
||||
bytes32 public messageRoot;
|
||||
|
||||
/// @notice The next unused message index.
|
||||
uint256 public nextMessageIndex;
|
||||
|
||||
/// @notice The list of zero hash in each height.
|
||||
bytes32[MAX_TREE_HEIGHT] private zeroHashes;
|
||||
|
||||
/// @notice The list of minimum merkle proofs needed to compute next root.
|
||||
/// @dev Only first `n` elements are used, where `n` is the minimum value that `2^{n-1} >= currentMaxNonce + 1`.
|
||||
/// It means we only use `currentMaxNonce + 1` leaf nodes to construct the merkle tree.
|
||||
bytes32[MAX_TREE_HEIGHT] public branches;
|
||||
|
||||
function _initializeMerkleTree() internal {
|
||||
// Compute hashes in empty sparse Merkle tree
|
||||
for (uint256 height = 0; height + 1 < MAX_TREE_HEIGHT; height++) {
|
||||
zeroHashes[height + 1] = _efficientHash(zeroHashes[height], zeroHashes[height]);
|
||||
}
|
||||
}
|
||||
|
||||
function _appendMessageHash(bytes32 _messageHash) internal returns (uint256, bytes32) {
|
||||
uint256 _currentMessageIndex = nextMessageIndex;
|
||||
bytes32 _hash = _messageHash;
|
||||
uint256 _height = 0;
|
||||
// @todo it can be optimized, since we only need the newly added branch.
|
||||
while (_currentMessageIndex != 0) {
|
||||
if (_currentMessageIndex % 2 == 0) {
|
||||
// it may be used in next round.
|
||||
branches[_height] = _hash;
|
||||
// it's a left child, the right child must be null
|
||||
_hash = _efficientHash(_hash, zeroHashes[_height]);
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
_hash = _efficientHash(branches[_height], _hash);
|
||||
}
|
||||
unchecked {
|
||||
_height += 1;
|
||||
}
|
||||
_currentMessageIndex >>= 1;
|
||||
}
|
||||
|
||||
branches[_height] = _hash;
|
||||
messageRoot = _hash;
|
||||
|
||||
_currentMessageIndex = nextMessageIndex;
|
||||
unchecked {
|
||||
nextMessageIndex = _currentMessageIndex + 1;
|
||||
}
|
||||
|
||||
return (_currentMessageIndex, _hash);
|
||||
}
|
||||
|
||||
function _efficientHash(bytes32 a, bytes32 b) private pure returns (bytes32 value) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
mstore(0x00, a)
|
||||
mstore(0x20, b)
|
||||
value := keccak256(0x00, 0x40)
|
||||
}
|
||||
}
|
||||
}
|
||||
8
contracts/src/libraries/common/Version.sol
Normal file
8
contracts/src/libraries/common/Version.sol
Normal file
@@ -0,0 +1,8 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
abstract contract Version {
|
||||
/// @notice the current contract version.
|
||||
string public constant version = "0.0.1";
|
||||
}
|
||||
487
contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol
Normal file
487
contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol
Normal file
@@ -0,0 +1,487 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
library PatriciaMerkleTrieVerifier {
|
||||
/// @notice Internal function to validates a proof from eth_getProof.
|
||||
/// @param account The address of the contract.
|
||||
/// @param storageKey The storage slot to verify.
|
||||
/// @param proof The rlp encoding result of eth_getProof.
|
||||
/// @return stateRoot The computed state root. Must be checked by the caller.
|
||||
/// @return storageValue The value of `storageKey`.
|
||||
///
|
||||
/// @dev The code is based on
|
||||
/// 1. https://eips.ethereum.org/EIPS/eip-1186
|
||||
/// 2. https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/
|
||||
/// 3. https://github.com/ethereum/go-ethereum/blob/master/trie/proof.go#L114
|
||||
/// 4. https://github.com/privacy-scaling-explorations/zkevm-chain/blob/master/contracts/templates/PatriciaValidator.sol
|
||||
///
|
||||
/// The encoding order of `proof` is
|
||||
/// ```text
|
||||
/// | 1 byte | ... | 1 byte | ... |
|
||||
/// | account proof length | account proof | storage proof length | storage proof |
|
||||
/// ```
|
||||
function verifyPatriciaProof(
|
||||
address account,
|
||||
bytes32 storageKey,
|
||||
bytes calldata proof
|
||||
) internal pure returns (bytes32 stateRoot, bytes32 storageValue) {
|
||||
assembly {
|
||||
// hashes 32 bytes of `v`
|
||||
function keccak_32(v) -> r {
|
||||
mstore(0x00, v)
|
||||
r := keccak256(0x00, 0x20)
|
||||
}
|
||||
// hashes the last 20 bytes of `v`
|
||||
function keccak_20(v) -> r {
|
||||
mstore(0x00, v)
|
||||
r := keccak256(0x0c, 0x14)
|
||||
}
|
||||
// reverts with error `msg`.
|
||||
// make sure the length of error string <= 32
|
||||
function revertWith(msg) {
|
||||
// keccak("Error(string)")
|
||||
mstore(0x00, shl(224, 0x08c379a0))
|
||||
mstore(0x04, 0x20) // str.offset
|
||||
mstore(0x44, msg)
|
||||
let msgLen
|
||||
for {} msg {} {
|
||||
msg := shl(8, msg)
|
||||
msgLen := add(msgLen, 1)
|
||||
}
|
||||
mstore(0x24, msgLen) // str.length
|
||||
revert(0x00, 0x64)
|
||||
}
|
||||
// reverts with `msg` when condition is not matched.
|
||||
// make sure the length of error string <= 32
|
||||
function require(cond, msg) {
|
||||
if iszero(cond) {
|
||||
revertWith(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// special function for decoding the storage value
|
||||
// because of the prefix truncation if value > 31 bytes
|
||||
// see `loadValue`
|
||||
function decodeItem(word, len) -> ret {
|
||||
// default
|
||||
ret := word
|
||||
|
||||
// RLP single byte
|
||||
if lt(word, 0x80) {
|
||||
leave
|
||||
}
|
||||
|
||||
// truncated
|
||||
if gt(len, 32) {
|
||||
leave
|
||||
}
|
||||
|
||||
// value is >= 0x80 and <= 32 bytes.
|
||||
// `len` should be at least 2 (prefix byte + value)
|
||||
// otherwise the RLP is malformed.
|
||||
let bits := mul(len, 8)
|
||||
// sub 8 bits - the prefix
|
||||
bits := sub(bits, 8)
|
||||
let mask := shl(bits, 0xff)
|
||||
// invert the mask
|
||||
mask := not(mask)
|
||||
// should hold the value - prefix byte
|
||||
ret := and(ret, mask)
|
||||
}
|
||||
|
||||
// returns the `len` of the whole RLP list at `ptr`
|
||||
// and the offset for the first value inside the list.
|
||||
function decodeListLength(ptr) -> len, startOffset {
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
// In most cases, it is a long list. So we reorder the branch to reduce branch prediction miss.
|
||||
|
||||
// 0xf8 - 0xff, long list, length > 55
|
||||
if gt(b0, 0xf7) {
|
||||
// the RLP encoding consists of a single byte with value 0xf7
|
||||
// plus the length in bytes of the length of the payload in binary form,
|
||||
// followed by the length of the payload, followed by the concatenation
|
||||
// of the RLP encodings of the items.
|
||||
// the extended length is ignored
|
||||
let lengthBytes := sub(b0, 0xf7)
|
||||
if gt(lengthBytes, 32) {
|
||||
invalid()
|
||||
}
|
||||
|
||||
// load the extended length
|
||||
startOffset := add(ptr, 1)
|
||||
let extendedLen := calldataload(startOffset)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
extendedLen := shr(bits, extendedLen)
|
||||
|
||||
len := add(extendedLen, lengthBytes)
|
||||
len := add(len, 1)
|
||||
startOffset := add(startOffset, lengthBytes)
|
||||
leave
|
||||
}
|
||||
// 0xc0 - 0xf7, short list, length <= 55
|
||||
if gt(b0, 0xbf) {
|
||||
// the RLP encoding consists of a single byte with value 0xc0
|
||||
// plus the length of the list followed by the concatenation of
|
||||
// the RLP encodings of the items.
|
||||
len := sub(b0, 0xbf)
|
||||
startOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
revertWith("Not list")
|
||||
}
|
||||
|
||||
// returns the kind, calldata offset of the value and the length in bytes
|
||||
// for the RLP encoded data item at `ptr`. used in `decodeFlat`
|
||||
// kind = 0 means string/bytes, kind = 1 means list.
|
||||
function decodeValue(ptr) -> kind, dataLen, valueOffset {
|
||||
let b0 := byte(0, calldataload(ptr))
|
||||
|
||||
// 0x00 - 0x7f, single byte
|
||||
if lt(b0, 0x80) {
|
||||
// for a single byte whose value is in the [0x00, 0x7f] range,
|
||||
// that byte is its own RLP encoding.
|
||||
dataLen := 1
|
||||
valueOffset := ptr
|
||||
leave
|
||||
}
|
||||
|
||||
// 0x80 - 0xb7, short string/bytes, length <= 55
|
||||
if lt(b0, 0xb8) {
|
||||
// the RLP encoding consists of a single byte with value 0x80
|
||||
// plus the length of the string followed by the string.
|
||||
dataLen := sub(b0, 0x80)
|
||||
valueOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
|
||||
// 0xb8 - 0xbf, long string/bytes, length > 55
|
||||
if lt(b0, 0xc0) {
|
||||
// the RLP encoding consists of a single byte with value 0xb7
|
||||
// plus the length in bytes of the length of the string in binary form,
|
||||
// followed by the length of the string, followed by the string.
|
||||
let lengthBytes := sub(b0, 0xb7)
|
||||
if gt(lengthBytes, 4) {
|
||||
invalid()
|
||||
}
|
||||
|
||||
// load the extended length
|
||||
valueOffset := add(ptr, 1)
|
||||
let extendedLen := calldataload(valueOffset)
|
||||
let bits := sub(256, mul(lengthBytes, 8))
|
||||
extendedLen := shr(bits, extendedLen)
|
||||
|
||||
dataLen := extendedLen
|
||||
valueOffset := add(valueOffset, lengthBytes)
|
||||
leave
|
||||
}
|
||||
|
||||
kind := 1
|
||||
// 0xc0 - 0xf7, short list, length <= 55
|
||||
if lt(b0, 0xf8) {
|
||||
// intentionally ignored
|
||||
// dataLen := sub(firstByte, 0xc0)
|
||||
valueOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
|
||||
// 0xf8 - 0xff, long list, length > 55
|
||||
{
|
||||
// the extended length is ignored
|
||||
dataLen := sub(b0, 0xf7)
|
||||
valueOffset := add(ptr, 1)
|
||||
leave
|
||||
}
|
||||
}
|
||||
|
||||
// decodes all RLP encoded data and stores their DATA items
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
|
||||
// Expects that the RLP starts with a list that defines the length
|
||||
// of the whole RLP region.
|
||||
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
|
||||
ptr := _ptr
|
||||
|
||||
// load free memory ptr
|
||||
// doesn't update the ptr and leaves the memory region dirty
|
||||
memStart := mload(0x40)
|
||||
|
||||
let payloadLen, startOffset := decodeListLength(ptr)
|
||||
// reuse memStart region and hash
|
||||
calldatacopy(memStart, ptr, payloadLen)
|
||||
hash := keccak256(memStart, payloadLen)
|
||||
|
||||
let memPtr := memStart
|
||||
let ptrStop := add(ptr, payloadLen)
|
||||
ptr := startOffset
|
||||
|
||||
// decode until the end of the list
|
||||
for {} lt(ptr, ptrStop) {} {
|
||||
let kind, len, valuePtr := decodeValue(ptr)
|
||||
ptr := add(len, valuePtr)
|
||||
|
||||
if iszero(kind) {
|
||||
// store the length of the data and the calldata offset
|
||||
// low -------> high
|
||||
// | 128 bits | 128 bits |
|
||||
// | calldata offset | value length |
|
||||
mstore(memPtr, or(shl(128, len), valuePtr))
|
||||
memPtr := add(memPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
if iszero(eq(ptr, ptrStop)) {
|
||||
invalid()
|
||||
}
|
||||
|
||||
nItems := div( sub(memPtr, memStart), 32 )
|
||||
}
|
||||
|
||||
// prefix gets truncated to 256 bits
|
||||
// `depth` is untrusted and can lead to bogus
|
||||
// shifts/masks. In that case, the remaining verification
|
||||
// steps must fail or lead to an invalid stateRoot hash
|
||||
// if the proof data is 'spoofed but valid'
|
||||
function derivePath(key, depth) -> path {
|
||||
path := key
|
||||
|
||||
let bits := mul(depth, 4)
|
||||
{
|
||||
let mask := not(0)
|
||||
mask := shr(bits, mask)
|
||||
path := and(path, mask)
|
||||
}
|
||||
|
||||
// even prefix
|
||||
let prefix := 0x20
|
||||
if mod(depth, 2) {
|
||||
// odd
|
||||
prefix := 0x3
|
||||
}
|
||||
|
||||
// the prefix may be shifted outside bounds
|
||||
// this is intended, see `loadValue`
|
||||
bits := sub(256, bits)
|
||||
prefix := shl(bits, prefix)
|
||||
path := or(prefix, path)
|
||||
}
|
||||
|
||||
// loads and aligns a value from calldata
|
||||
// given the `len|offset` stored at `memPtr`
|
||||
function loadValue(memPtr, idx) -> value {
|
||||
let tmp := mload(add(memPtr, mul(32, idx)))
|
||||
// assuming 0xffffff is sufficient for storing calldata offset
|
||||
let offset := and(tmp, 0xffffff)
|
||||
let len := shr(128, tmp)
|
||||
|
||||
if gt(len, 31) {
|
||||
// special case - truncating the value is intended.
|
||||
// this matches the behavior in `derivePath` that truncates to 256 bits.
|
||||
offset := add(offset, sub(len, 32))
|
||||
value := calldataload(offset)
|
||||
leave
|
||||
}
|
||||
|
||||
// everything else is
|
||||
// < 32 bytes - align the value
|
||||
let bits := mul( sub(32, len), 8)
|
||||
value := calldataload(offset)
|
||||
value := shr(bits, value)
|
||||
}
|
||||
|
||||
// loads and aligns a value from calldata
|
||||
// given the `len|offset` stored at `memPtr`
|
||||
// Same as `loadValue` except it returns also the size
|
||||
// of the value.
|
||||
function loadValueLen(memPtr, idx) -> value, len {
|
||||
let tmp := mload(add(memPtr, mul(32, idx)))
|
||||
// assuming 0xffffff is sufficient for storing calldata offset
|
||||
let offset := and(tmp, 0xffffff)
|
||||
len := shr(128, tmp)
|
||||
|
||||
if gt(len, 31) {
|
||||
// special case - truncating the value is intended.
|
||||
// this matches the behavior in `derivePath` that truncates to 256 bits.
|
||||
offset := add(offset, sub(len, 32))
|
||||
value := calldataload(offset)
|
||||
leave
|
||||
}
|
||||
|
||||
// everything else is
|
||||
// < 32 bytes - align the value
|
||||
let bits := mul( sub(32, len), 8)
|
||||
value := calldataload(offset)
|
||||
value := shr(bits, value)
|
||||
}
|
||||
|
||||
function loadPair(memPtr, idx) -> offset, len {
|
||||
let tmp := mload(add(memPtr, mul(32, idx)))
|
||||
// assuming 0xffffff is sufficient for storing calldata offset
|
||||
offset := and(tmp, 0xffffff)
|
||||
len := shr(128, tmp)
|
||||
}
|
||||
|
||||
// decodes RLP at `_ptr`.
|
||||
// reverts if the number of DATA items doesn't match `nValues`.
|
||||
// returns the RLP data items at pos `v0`, `v1`
|
||||
// and the size of `v1out`
|
||||
function hashCompareSelect(_ptr, nValues, v0, v1) -> ptr, hash, v0out, v1out, v1outlen {
|
||||
ptr := _ptr
|
||||
|
||||
let memStart, nItems
|
||||
ptr, memStart, nItems, hash := decodeFlat(ptr)
|
||||
|
||||
if iszero( eq(nItems, nValues) ) {
|
||||
revertWith('Node items mismatch')
|
||||
}
|
||||
|
||||
v0out, v1outlen := loadValueLen(memStart, v0)
|
||||
v1out, v1outlen := loadValueLen(memStart, v1)
|
||||
}
|
||||
|
||||
// traverses the tree from the root to the node before the leaf.
|
||||
// based on https://github.com/ethereum/go-ethereum/blob/master/trie/proof.go#L114
|
||||
function walkTree(key, _ptr) -> ptr, rootHash, expectedHash, path {
|
||||
ptr := _ptr
|
||||
|
||||
// the first byte is the number of nodes
|
||||
let nodes := byte(0, calldataload(ptr))
|
||||
ptr := add(ptr, 1)
|
||||
|
||||
// keeps track of ascend/descend - however you may look at a tree
|
||||
let depth
|
||||
|
||||
// treat the leaf node with different logic
|
||||
for { let i := 1 } lt(i, nodes) { i := add(i, 1) } {
|
||||
let memStart, nItems, hash
|
||||
ptr, memStart, nItems, hash := decodeFlat(ptr)
|
||||
|
||||
// first item is considered the root node.
|
||||
// Otherwise verifies that the hash of the current node
|
||||
// is the same as the previous choosen one.
|
||||
switch i
|
||||
case 1 {
|
||||
rootHash := hash
|
||||
} default {
|
||||
require(eq(hash, expectedHash), 'Hash mismatch')
|
||||
}
|
||||
|
||||
switch nItems
|
||||
case 2 {
|
||||
// extension node
|
||||
// load the second item.
|
||||
// this is the hash of the next node.
|
||||
let value, len := loadValueLen(memStart, 1)
|
||||
expectedHash := value
|
||||
|
||||
// get the byte length of the first item
|
||||
// Note: the value itself is not validated
|
||||
// and it is instead assumed that any invalid
|
||||
// value is invalidated by comparing the root hash.
|
||||
let prefixLen := shr(128, mload(memStart))
|
||||
depth := add(depth, prefixLen)
|
||||
}
|
||||
case 17 {
|
||||
let bits := sub(252, mul(depth, 4))
|
||||
let nibble := and(shr(bits, key), 0xf)
|
||||
|
||||
// load the value at pos `nibble`
|
||||
let value, len := loadValueLen(memStart, nibble)
|
||||
|
||||
expectedHash := value
|
||||
depth := add(depth, 1)
|
||||
}
|
||||
default {
|
||||
// everything else is unexpected
|
||||
revertWith('Invalid node')
|
||||
}
|
||||
}
|
||||
|
||||
// lastly, derive the path of the choosen one (TM)
|
||||
path := derivePath(key, depth)
|
||||
}
|
||||
|
||||
// shared variable names
|
||||
let storageHash
|
||||
let encodedPath
|
||||
let path
|
||||
let hash
|
||||
let vlen
|
||||
// starting point
|
||||
let ptr := proof.offset
|
||||
|
||||
{
|
||||
// account proof
|
||||
// Note: this doesn't work if there are no intermediate nodes before the leaf.
|
||||
// This is not possible in practice because of the fact that there must be at least
|
||||
// 2 accounts in the tree to make a transaction to a existing contract possible.
|
||||
// Thus, 2 leaves.
|
||||
let prevHash
|
||||
let key := keccak_20(account)
|
||||
// `stateRoot` is a return value and must be checked by the caller
|
||||
ptr, stateRoot, prevHash, path := walkTree(key, ptr)
|
||||
|
||||
let memStart, nItems
|
||||
ptr, memStart, nItems, hash := decodeFlat(ptr)
|
||||
|
||||
// the hash of the leaf must match the previous hash from the node
|
||||
require(eq(hash, prevHash), 'Account leaf hash mismatch')
|
||||
|
||||
// 2 items
|
||||
// - encoded path
|
||||
// - account leaf RLP (4 items)
|
||||
require(eq(nItems, 2), "Account leaf node mismatch")
|
||||
|
||||
encodedPath := loadValue(memStart, 0)
|
||||
// the calculated path must match the encoded path in the leaf
|
||||
require(eq(path, encodedPath), 'Account encoded path mismatch')
|
||||
|
||||
// Load the position, length of the second element (RLP encoded)
|
||||
let leafPtr, leafLen := loadPair(memStart, 1)
|
||||
leafPtr, memStart, nItems, hash := decodeFlat(leafPtr)
|
||||
|
||||
// the account leaf should contain 4 values,
|
||||
// we want:
|
||||
// - storageHash @ 2
|
||||
require(eq(nItems, 4), "Account leaf items mismatch")
|
||||
storageHash := loadValue(memStart, 2)
|
||||
}
|
||||
|
||||
{
|
||||
// storage proof
|
||||
let rootHash
|
||||
let key := keccak_32(storageKey)
|
||||
ptr, rootHash, hash, path := walkTree(key, ptr)
|
||||
|
||||
// leaf should contain 2 values
|
||||
// - encoded path @ 0
|
||||
// - storageValue @ 1
|
||||
ptr, hash, encodedPath, storageValue, vlen := hashCompareSelect(ptr, 2, 0, 1)
|
||||
// the calculated path must match the encoded path in the leaf
|
||||
require(eq(path, encodedPath), 'Storage encoded path mismatch')
|
||||
|
||||
switch rootHash
|
||||
case 0 {
|
||||
// in the case that the leaf is the only element, then
|
||||
// the hash of the leaf must match the value from the account leaf
|
||||
require(eq(hash, storageHash), 'Storage root mismatch')
|
||||
}
|
||||
default {
|
||||
// otherwise the root hash of the storage tree
|
||||
// must match the value from the account leaf
|
||||
require(eq(rootHash, storageHash), 'Storage root mismatch')
|
||||
}
|
||||
|
||||
// storageValue is a return value
|
||||
storageValue := decodeItem(storageValue, vlen)
|
||||
}
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero( eq(ptr, add(proof.offset, proof.length)) ) {
|
||||
revertWith('Proof length mismatch')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,32 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
library ZkTrieVerifier {
|
||||
function verifyMerkleProof(bytes memory) internal pure returns (bool) {
|
||||
return true;
|
||||
function verifyMerkleProof(
|
||||
bytes32 _root,
|
||||
bytes32 _hash,
|
||||
uint256 _nonce,
|
||||
bytes32[] memory _proofs
|
||||
) internal pure returns (bool) {
|
||||
// _root = 0 means we don't want to verify.
|
||||
if (_root == 0) return true;
|
||||
|
||||
for (uint256 i = 0; i < _proofs.length; i++) {
|
||||
if (_nonce % 2 == 0) {
|
||||
_hash = _efficientHash(_hash, _proofs[i]);
|
||||
} else {
|
||||
_hash = _efficientHash(_proofs[i], _hash);
|
||||
}
|
||||
_nonce /= 2;
|
||||
}
|
||||
return _hash == _root;
|
||||
}
|
||||
|
||||
function _efficientHash(bytes32 a, bytes32 b) private pure returns (bytes32 value) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
mstore(0x00, a)
|
||||
mstore(0x20, b)
|
||||
value := keccak256(0x00, 0x40)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
54
contracts/src/mocks/MockL1ScrollMessenger.sol
Normal file
54
contracts/src/mocks/MockL1ScrollMessenger.sol
Normal file
@@ -0,0 +1,54 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../L1/L1ScrollMessenger.sol";
|
||||
import "../L1/rollup/ZKRollup.sol";
|
||||
|
||||
contract MockL1ScrollMessenger is L1ScrollMessenger {
|
||||
/// @inheritdoc IL1ScrollMessenger
|
||||
/// @dev Mock function to skip verification logic.
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory _proof
|
||||
) external override whenNotPaused onlyWhitelistedSender(msg.sender) {
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
// @note disable for now since we cannot generate proof in time.
|
||||
// require(_deadline >= block.timestamp, "Message expired");
|
||||
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
|
||||
require(!isMessageExecuted[_msghash], "Message successfully executed");
|
||||
|
||||
(, , , , bytes32 _messageRoot) = ZKRollup(rollup).blocks(_proof.blockHash);
|
||||
require(ZkTrieVerifier.verifyMerkleProof(_messageRoot, _msghash, _nonce, _proof.messageRootProof), "invalid proof");
|
||||
|
||||
// @note This usually will never happen, just in case.
|
||||
require(_from != xDomainMessageSender, "invalid message sender");
|
||||
|
||||
xDomainMessageSender = _from;
|
||||
// solhint-disable-next-line avoid-low-level-calls
|
||||
(bool success, ) = _to.call{ value: _value }(_message);
|
||||
// reset value to refund gas.
|
||||
xDomainMessageSender = ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER;
|
||||
|
||||
if (success) {
|
||||
isMessageExecuted[_msghash] = true;
|
||||
emit RelayedMessage(_msghash);
|
||||
} else {
|
||||
emit FailedRelayedMessage(_msghash);
|
||||
}
|
||||
|
||||
bytes32 _relayId = keccak256(abi.encodePacked(_msghash, msg.sender, block.number));
|
||||
|
||||
isMessageRelayed[_relayId] = true;
|
||||
}
|
||||
}
|
||||
25
contracts/src/mocks/MockPatriciaMerkleTrieVerifier.sol
Normal file
25
contracts/src/mocks/MockPatriciaMerkleTrieVerifier.sol
Normal file
@@ -0,0 +1,25 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { PatriciaMerkleTrieVerifier } from "../libraries/verifier/PatriciaMerkleTrieVerifier.sol";
|
||||
|
||||
contract MockPatriciaMerkleTrieVerifier {
|
||||
function verifyPatriciaProof(
|
||||
address account,
|
||||
bytes32 storageKey,
|
||||
bytes calldata proof
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
bytes32 stateRoot,
|
||||
bytes32 storageValue,
|
||||
uint256 gasUsed
|
||||
)
|
||||
{
|
||||
uint256 start = gasleft();
|
||||
(stateRoot, storageValue) = PatriciaMerkleTrieVerifier.verifyPatriciaProof(account, storageKey, proof);
|
||||
gasUsed = start - gasleft();
|
||||
}
|
||||
}
|
||||
@@ -34,8 +34,6 @@ contract L1GatewayRouterTest is DSTestPlus {
|
||||
messenger = new L1ScrollMessenger();
|
||||
messenger.initialize(address(rollup));
|
||||
|
||||
rollup.updateMessenger(address(messenger));
|
||||
|
||||
router = new L1GatewayRouter();
|
||||
router.initialize(address(0), address(1), address(messenger));
|
||||
|
||||
|
||||
98
contracts/src/test/L2GasPriceOracle.t.sol
Normal file
98
contracts/src/test/L2GasPriceOracle.t.sol
Normal file
@@ -0,0 +1,98 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { DSTestPlus } from "solmate/test/utils/DSTestPlus.sol";
|
||||
import { WETH } from "solmate/tokens/WETH.sol";
|
||||
|
||||
import { L1BlockContainer } from "../L2/predeploys/L1BlockContainer.sol";
|
||||
import { L2GasPriceOracle } from "../L2/predeploys/L2GasPriceOracle.sol";
|
||||
|
||||
contract L2GasPriceOracleTest is DSTestPlus {
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
L2GasPriceOracle private oracle;
|
||||
L1BlockContainer private container;
|
||||
|
||||
function setUp() public {
|
||||
container = new L1BlockContainer(address(0), address(0));
|
||||
oracle = new L2GasPriceOracle(address(this), address(container));
|
||||
}
|
||||
|
||||
function testSetOverhead(uint256 _overhead) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setOverhead(_overhead);
|
||||
hevm.stopPrank();
|
||||
|
||||
// overhead is too large
|
||||
hevm.expectRevert("exceed maximum overhead");
|
||||
oracle.setOverhead(MAX_OVERHEAD + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.overhead(), 0);
|
||||
oracle.setOverhead(_overhead);
|
||||
assertEq(oracle.overhead(), _overhead);
|
||||
}
|
||||
|
||||
function testSetScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert("exceed maximum scale");
|
||||
oracle.setScalar(MAX_SCALE + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.scalar(), 0);
|
||||
oracle.setScalar(_scalar);
|
||||
assertEq(oracle.scalar(), _scalar);
|
||||
}
|
||||
|
||||
function testGetL1GasUsed(uint256 _overhead, bytes memory _data) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
|
||||
uint256 _gasUsed = _overhead + 68 * 16;
|
||||
for (uint256 i = 0; i < _data.length; i++) {
|
||||
if (_data[i] == 0) _gasUsed += 4;
|
||||
else _gasUsed += 16;
|
||||
}
|
||||
|
||||
assertEq(oracle.getL1GasUsed(_data), _gasUsed);
|
||||
}
|
||||
|
||||
function testGetL1Fee(
|
||||
uint256 _baseFee,
|
||||
uint256 _overhead,
|
||||
uint256 _scalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
oracle.setScalar(_scalar);
|
||||
container.initialize(address(0), bytes32(0), 0, 0, uint128(_baseFee), bytes32(0));
|
||||
|
||||
uint256 _gasUsed = _overhead + 68 * 16;
|
||||
for (uint256 i = 0; i < _data.length; i++) {
|
||||
if (_data[i] == 0) _gasUsed += 4;
|
||||
else _gasUsed += 16;
|
||||
}
|
||||
|
||||
assertEq(oracle.getL1Fee(_data), (_gasUsed * _baseFee * _scalar) / PRECISION);
|
||||
}
|
||||
}
|
||||
62
contracts/src/test/L2MessageQueue.t.sol
Normal file
62
contracts/src/test/L2MessageQueue.t.sol
Normal file
@@ -0,0 +1,62 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { DSTestPlus } from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import { L2MessageQueue } from "../L2/predeploys/L2MessageQueue.sol";
|
||||
|
||||
contract L2MessageQueueTest is DSTestPlus {
|
||||
L2MessageQueue queue;
|
||||
|
||||
function setUp() public {
|
||||
queue = new L2MessageQueue(address(this));
|
||||
}
|
||||
|
||||
function testConstructor() external {
|
||||
assertEq(queue.messenger(), address(this));
|
||||
assertEq(queue.nextMessageIndex(), 0);
|
||||
}
|
||||
|
||||
function testPassMessageFailed() external {
|
||||
// not messenger
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("only messenger");
|
||||
queue.appendMessage(bytes32(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// duplicated message
|
||||
queue.appendMessage(bytes32(0));
|
||||
hevm.expectRevert("duplicated message");
|
||||
queue.appendMessage(bytes32(0));
|
||||
}
|
||||
|
||||
function testPassMessageOnceSuccess(bytes32 _message) external {
|
||||
queue.appendMessage(_message);
|
||||
assertEq(queue.nextMessageIndex(), 1);
|
||||
assertEq(queue.branches(0), _message);
|
||||
assertEq(queue.messageRoot(), _message);
|
||||
}
|
||||
|
||||
function testPassMessageSuccess() external {
|
||||
queue.appendMessage(bytes32(uint256(1)));
|
||||
assertEq(queue.nextMessageIndex(), 1);
|
||||
assertEq(queue.branches(0), bytes32(uint256(1)));
|
||||
assertEq(queue.messageRoot(), bytes32(uint256(1)));
|
||||
|
||||
queue.appendMessage(bytes32(uint256(2)));
|
||||
assertEq(queue.nextMessageIndex(), 2);
|
||||
assertEq(queue.branches(1), bytes32(uint256(0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0)));
|
||||
assertEq(queue.messageRoot(), bytes32(uint256(0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0)));
|
||||
|
||||
queue.appendMessage(bytes32(uint256(3)));
|
||||
assertEq(queue.nextMessageIndex(), 3);
|
||||
assertEq(queue.branches(2), bytes32(uint256(0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c)));
|
||||
assertEq(queue.messageRoot(), bytes32(uint256(0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c)));
|
||||
|
||||
queue.appendMessage(bytes32(uint256(4)));
|
||||
assertEq(queue.nextMessageIndex(), 4);
|
||||
assertEq(queue.branches(2), bytes32(uint256(0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36)));
|
||||
assertEq(queue.messageRoot(), bytes32(uint256(0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36)));
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,6 @@ contract ZKRollupTest is DSTestPlus {
|
||||
assertEq(address(this), rollup.owner());
|
||||
assertEq(rollup.layer2ChainId(), 233);
|
||||
assertEq(rollup.operator(), address(0));
|
||||
assertEq(rollup.messenger(), address(0));
|
||||
|
||||
hevm.expectRevert("Initializable: contract is already initialized");
|
||||
rollup.initialize(555);
|
||||
@@ -42,24 +41,6 @@ contract ZKRollupTest is DSTestPlus {
|
||||
rollup.updateOperator(_operator);
|
||||
}
|
||||
|
||||
function testUpdateMessenger(address _messenger) public {
|
||||
if (_messenger == address(0)) return;
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
rollup.updateMessenger(_messenger);
|
||||
hevm.stopPrank();
|
||||
|
||||
// change to random messenger
|
||||
rollup.updateMessenger(_messenger);
|
||||
assertEq(rollup.messenger(), _messenger);
|
||||
|
||||
// set to same messenger, should revert
|
||||
hevm.expectRevert("change to same messenger");
|
||||
rollup.updateMessenger(_messenger);
|
||||
}
|
||||
|
||||
function testImportGenesisBlock(IZKRollup.Layer2BlockHeader memory _genesis) public {
|
||||
if (_genesis.blockHash == bytes32(0)) {
|
||||
_genesis.blockHash = bytes32(uint256(1));
|
||||
@@ -67,12 +48,6 @@ contract ZKRollupTest is DSTestPlus {
|
||||
_genesis.parentHash = bytes32(0);
|
||||
_genesis.blockHeight = 0;
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
rollup.importGenesisBlock(_genesis);
|
||||
hevm.stopPrank();
|
||||
|
||||
// not genesis block, should revert
|
||||
_genesis.blockHeight = 1;
|
||||
hevm.expectRevert("Block is not genesis");
|
||||
@@ -98,7 +73,7 @@ contract ZKRollupTest is DSTestPlus {
|
||||
assertEq(rollup.finalizedBatches(0), bytes32(0));
|
||||
rollup.importGenesisBlock(_genesis);
|
||||
{
|
||||
(bytes32 parentHash, , uint64 blockHeight, uint64 batchIndex) = rollup.blocks(_genesis.blockHash);
|
||||
(bytes32 parentHash, , uint64 blockHeight, uint64 batchIndex, ) = rollup.blocks(_genesis.blockHash);
|
||||
assertEq(_genesis.parentHash, parentHash);
|
||||
assertEq(_genesis.blockHeight, blockHeight);
|
||||
assertEq(batchIndex, 0);
|
||||
@@ -239,7 +214,7 @@ contract ZKRollupTest is DSTestPlus {
|
||||
|
||||
// verify block
|
||||
{
|
||||
(bytes32 parentHash, , uint64 blockHeight, uint64 batchIndex) = rollup.blocks(_header.blockHash);
|
||||
(bytes32 parentHash, , uint64 blockHeight, uint64 batchIndex, ) = rollup.blocks(_header.blockHash);
|
||||
assertEq(parentHash, _header.parentHash);
|
||||
assertEq(blockHeight, _header.blockHeight);
|
||||
assertEq(batchIndex, _batch.batchIndex);
|
||||
|
||||
@@ -426,10 +426,10 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
|
||||
}()
|
||||
|
||||
// Get block traces.
|
||||
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_id": task.ID})
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"could not GetBlockInfos",
|
||||
"could not GetL2BlockInfos",
|
||||
"batch_id", task.ID,
|
||||
"error", err,
|
||||
)
|
||||
|
||||
@@ -370,7 +370,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.L2BlockInfo{Number: uint64(i)}, &orm.L2BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
}
|
||||
@@ -407,7 +407,7 @@ func testGracefulRestart(t *testing.T) {
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ids[i], err = l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
ids[i], err = l2db.NewBatchInDBTx(dbTx, &orm.L2BlockInfo{Number: uint64(i)}, &orm.L2BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
@@ -66,7 +66,7 @@ func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, int(cur))
|
||||
assert.Equal(t, 7, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
alter table block_trace
|
||||
add column message_root VARCHAR DEFAULT NULL;
|
||||
|
||||
alter table l1_message
|
||||
add column message_proof VARCHAR DEFAULT NULL;
|
||||
|
||||
alter table l1_message
|
||||
add column proof_height BIGINT DEFAULT 0;
|
||||
|
||||
create index l1_message_proof_height_index on l1_message (proof_height);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
alter table block_trace drop column message_root;
|
||||
|
||||
drop index l1_message_proof_height_index;
|
||||
|
||||
alter table l1_message drop column proof_height;
|
||||
|
||||
alter table l1_message drop column message_proof;
|
||||
|
||||
-- +goose StatementEnd
|
||||
27
database/migrate/migrations/00007_l1_block.sql
Normal file
27
database/migrate/migrations/00007_l1_block.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table l1_block
|
||||
(
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
header_rlp TEXT NOT NULL,
|
||||
block_status INTEGER DEFAULT 1,
|
||||
import_tx_hash VARCHAR DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index l1_block_hash_uindex
|
||||
on l1_block (hash);
|
||||
|
||||
create unique index l1_block_number_uindex
|
||||
on l1_block (number);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
@@ -192,7 +192,7 @@ func (o *blockBatchOrm) ResetProvingStatusFor(before ProvingStatus) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
|
||||
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *L2BlockInfo, endBlock *L2BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
|
||||
row := dbTx.QueryRow("SELECT COALESCE(MAX(index), 0) FROM block_batch;")
|
||||
|
||||
// TODO: use *big.Int for this
|
||||
|
||||
@@ -85,8 +85,8 @@ func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...st
|
||||
return traces, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp FROM block_trace WHERE 1 = 1 "
|
||||
func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*L2BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp, message_root FROM block_trace WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -98,9 +98,9 @@ func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blocks []*BlockInfo
|
||||
var blocks []*L2BlockInfo
|
||||
for rows.Next() {
|
||||
block := &BlockInfo{}
|
||||
block := &L2BlockInfo{}
|
||||
if err = rows.StructScan(block); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -113,8 +113,8 @@ func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...str
|
||||
return blocks, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp FROM block_trace WHERE batch_id is NULL "
|
||||
func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*L2BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp, message_root FROM block_trace WHERE batch_id is NULL AND message_root is not NULL"
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -126,9 +126,9 @@ func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blocks []*BlockInfo
|
||||
var blocks []*L2BlockInfo
|
||||
for rows.Next() {
|
||||
block := &BlockInfo{}
|
||||
block := &L2BlockInfo{}
|
||||
if err = rows.StructScan(block); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -209,3 +209,20 @@ func (o *blockTraceOrm) SetBatchIDForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint6
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// http://jmoiron.github.io/sqlx/#inQueries
|
||||
// https://stackoverflow.com/questions/56568799/how-to-update-multiple-rows-using-sqlx
|
||||
func (o *blockTraceOrm) SetMessageRootForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, messageRoot string) error {
|
||||
query := "UPDATE block_trace SET message_root=? WHERE number IN (?)"
|
||||
|
||||
qry, args, err := sqlx.In(query, messageRoot, numbers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := dbTx.Exec(dbTx.Rebind(qry), args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,36 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
type L1BlockStatus int
|
||||
|
||||
const (
|
||||
// L1BlockUndefined : undefined l1 block status
|
||||
L1BlockUndefined L1BlockStatus = iota
|
||||
|
||||
// L1BlockPending represents the l1 block status is pending
|
||||
L1BlockPending
|
||||
|
||||
// L1BlockImporting represents the l1 block status is importing
|
||||
L1BlockImporting
|
||||
|
||||
// L1BlockImported represents the l1 block status is imported
|
||||
L1BlockImported
|
||||
|
||||
// L1BlockFailed represents the l1 block status is failed
|
||||
L1BlockFailed
|
||||
)
|
||||
|
||||
// L1BlockInfo is structure of stored l1 block
|
||||
type L1BlockInfo struct {
|
||||
Number uint64 `json:"number" db:"number"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
HeaderRLP string `json:"header_rlp" db:"header_rlp"`
|
||||
BlockStatus uint64 `json:"block_status" db:"block_status"`
|
||||
|
||||
ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"`
|
||||
}
|
||||
|
||||
// MsgStatus represents current layer1 transaction processing status
|
||||
type MsgStatus int
|
||||
|
||||
@@ -35,18 +65,20 @@ const (
|
||||
|
||||
// L1Message is structure of stored layer1 bridge message
|
||||
type L1Message struct {
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Fee string `json:"fee" db:"fee"`
|
||||
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
|
||||
Deadline uint64 `json:"deadline" db:"deadline"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Fee string `json:"fee" db:"fee"`
|
||||
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
|
||||
Deadline uint64 `json:"deadline" db:"deadline"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
ProofHeight uint64 `json:"proof_height" db:"proof_height"`
|
||||
MessageProof string `json:"message_proof" db:"message_proof"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
@@ -63,10 +95,11 @@ type L2Message struct {
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
Proof string `json:"proof" db:"proof"`
|
||||
}
|
||||
|
||||
// BlockInfo is structure of stored `block_trace` without `trace`
|
||||
type BlockInfo struct {
|
||||
// L2BlockInfo is structure of stored `block_trace` without `trace`
|
||||
type L2BlockInfo struct {
|
||||
Number uint64 `json:"number" db:"number"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
@@ -74,6 +107,7 @@ type BlockInfo struct {
|
||||
TxNum uint64 `json:"tx_num" db:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" db:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
|
||||
MessageRoot sql.NullString `json:"message_root" db:"message_root"`
|
||||
}
|
||||
|
||||
// RollerProveStatus is the roller prove status of a block batch (session)
|
||||
@@ -115,18 +149,31 @@ type SessionInfo struct {
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
}
|
||||
|
||||
// L1BlockOrm l1_block operation interface
|
||||
type L1BlockOrm interface {
|
||||
GetL1BlockInfos(fields map[string]interface{}, args ...string) ([]*L1BlockInfo, error)
|
||||
InsertL1Blocks(ctx context.Context, blocks []*L1BlockInfo) error
|
||||
DeleteHeaderRLPByBlockHash(ctx context.Context, blockHash string) error
|
||||
UpdateImportTxHash(ctx context.Context, blockHash, txHash string) error
|
||||
UpdateL1BlockStatus(ctx context.Context, blockHash string, status L1BlockStatus) error
|
||||
UpdateL1BlockStatusAndImportTxHash(ctx context.Context, blockHash string, status L1BlockStatus, txHash string) error
|
||||
GetLatestL1BlockHeight() (uint64, error)
|
||||
GetLatestImportedL1Block() (*L1BlockInfo, error)
|
||||
}
|
||||
|
||||
// BlockTraceOrm block_trace operation interface
|
||||
type BlockTraceOrm interface {
|
||||
Exist(number uint64) (bool, error)
|
||||
GetBlockTracesLatestHeight() (int64, error)
|
||||
GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error)
|
||||
GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
|
||||
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*L2BlockInfo, error)
|
||||
// add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
|
||||
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*L2BlockInfo, error)
|
||||
GetHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchID(batchID string) error
|
||||
InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
SetBatchIDForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchID string) error
|
||||
SetMessageRootForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, messageRoot string) error
|
||||
}
|
||||
|
||||
// SessionInfoOrm sessions info operation inte
|
||||
@@ -143,7 +190,7 @@ type BlockBatchOrm interface {
|
||||
UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error
|
||||
UpdateProvingStatus(id string, status ProvingStatus) error
|
||||
ResetProvingStatusFor(before ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *L2BlockInfo, endBlock *L2BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
|
||||
BatchRecordExist(id string) (bool, error)
|
||||
GetPendingBatches(limit uint64) ([]string, error)
|
||||
GetCommittedBatches(limit uint64) ([]string, error)
|
||||
@@ -164,9 +211,11 @@ type BlockBatchOrm interface {
|
||||
type L1MessageOrm interface {
|
||||
GetL1MessageByNonce(nonce uint64) (*L1Message, error)
|
||||
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
|
||||
GetL1MessagesByStatusUpToProofHeight(status MsgStatus, height uint64, limit uint64) ([]*L1Message, error)
|
||||
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
|
||||
GetL1ProcessedNonce() (int64, error)
|
||||
SaveL1Messages(ctx context.Context, messages []*L1Message) error
|
||||
SaveL1MessagesInDbTx(ctx context.Context, dbTx *sqlx.Tx, messages []*L1Message) error
|
||||
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
|
||||
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error
|
||||
@@ -180,10 +229,12 @@ type L2MessageOrm interface {
|
||||
GetL2MessageByNonce(nonce uint64) (*L2Message, error)
|
||||
GetL2MessageByMsgHash(msgHash string) (*L2Message, error)
|
||||
MessageProofExist(nonce uint64) (bool, error)
|
||||
GetMessageProofByNonce(nonce uint64) (string, error)
|
||||
GetMessageProofByNonce(nonce uint64) ([]byte, error)
|
||||
GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error)
|
||||
GetL2ProcessedNonce() (int64, error)
|
||||
GetLayer2LatestMessageNonce() (int64, error)
|
||||
SaveL2Messages(ctx context.Context, messages []*L2Message) error
|
||||
SaveL2MessagesInDbTx(ctx context.Context, dbTx *sqlx.Tx, messages []*L2Message) error
|
||||
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
|
||||
UpdateLayer2Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user