mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
5 Commits
alpha-v1.2
...
alpha-v1.7
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f3eddf773 | ||
|
|
6fcd6b1b6c | ||
|
|
9e2f2c3e9c | ||
|
|
6816a7e911 | ||
|
|
fb7002bd6d |
2
.github/workflows/bridge.yml
vendored
2
.github/workflows/bridge.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
@@ -12,6 +13,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '.github/workflows/common.yml'
|
||||
@@ -12,6 +13,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
2
.github/workflows/contracts.yaml
vendored
2
.github/workflows/contracts.yaml
vendored
@@ -8,6 +8,7 @@ on:
|
||||
- prod
|
||||
- release/*
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'contracts/**'
|
||||
- '.github/workflows/contracts.yaml'
|
||||
@@ -18,6 +19,7 @@ on:
|
||||
- prod
|
||||
- release/*
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'contracts/**'
|
||||
- '.github/workflows/contracts.yaml'
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
@@ -12,6 +13,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'database/**'
|
||||
- '.github/workflows/database.yml'
|
||||
@@ -12,6 +13,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'database/**'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
2
.github/workflows/roller.yml
vendored
2
.github/workflows/roller.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'roller/**'
|
||||
- '.github/workflows/roller.yml'
|
||||
@@ -12,6 +13,7 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- alpha
|
||||
paths:
|
||||
- 'roller/**'
|
||||
- '.github/workflows/roller.yml'
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,69 +10,74 @@ import (
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
func TestPackRelayMessageWithProof(t *testing.T) {
|
||||
func TestEventSignature(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
|
||||
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
|
||||
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
|
||||
|
||||
assert.Equal(bridge_abi.L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
|
||||
|
||||
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
|
||||
|
||||
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
|
||||
}
|
||||
|
||||
func TestPackRelayL2MessageWithProof(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
l1MessengerABI, err := bridge_abi.L1ScrollMessengerMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(0),
|
||||
BatchIndex: big.NewInt(0),
|
||||
BatchHash: common.Hash{},
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackCommitBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
|
||||
scrollChainABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: common.Address{},
|
||||
Target: common.Address{},
|
||||
Nonce: 0,
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(0),
|
||||
Value: big.NewInt(0),
|
||||
Data: make([]byte, 0),
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
V: 0,
|
||||
}
|
||||
header := bridge_abi.IScrollChainBlockContext{
|
||||
BlockHash: common.Hash{},
|
||||
ParentHash: common.Hash{},
|
||||
BlockNumber: 0,
|
||||
Timestamp: 0,
|
||||
BaseFee: big.NewInt(0),
|
||||
GasLimit: 0,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
|
||||
header := bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: common.Hash{},
|
||||
ParentHash: common.Hash{},
|
||||
BaseFee: big.NewInt(0),
|
||||
StateRoot: common.Hash{},
|
||||
BlockHeight: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: txns,
|
||||
batch := bridge_abi.IScrollChainBatch{
|
||||
Blocks: []bridge_abi.IScrollChainBlockContext{header},
|
||||
PrevStateRoot: common.Hash{},
|
||||
NewStateRoot: common.Hash{},
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
BatchIndex: 0,
|
||||
L2Transactions: make([]byte, 0),
|
||||
}
|
||||
|
||||
batch := bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: 0,
|
||||
ParentHash: common.Hash{},
|
||||
Blocks: []bridge_abi.IZKRollupLayer2BlockHeader{header},
|
||||
}
|
||||
|
||||
_, err = l1RollupABI.Pack("commitBatch", batch)
|
||||
_, err = scrollChainABI.Pack("commitBatch", batch)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
|
||||
l1RollupABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
proof := make([]*big.Int, 10)
|
||||
@@ -86,12 +91,43 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackRelayMessage(t *testing.T) {
|
||||
func TestPackRelayL1Message(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
l2MessengerABI, err := bridge_abi.L2ScrollMessengerMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0))
|
||||
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackSetL1BaseFee(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1GasOracleABI, err := bridge_abi.L1GasPriceOracleMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
baseFee := big.NewInt(2333)
|
||||
_, err = l1GasOracleABI.Pack("setL1BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackSetL2BaseFee(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l2GasOracleABI, err := bridge_abi.L2GasPriceOracleMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
baseFee := big.NewInt(2333)
|
||||
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1BlockContainerABI := bridge_abi.L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -3,13 +3,15 @@
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"check_pending_time": 3,
|
||||
"check_pending_time": 2,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": "0x1",
|
||||
"escalate_multiple_num": 11,
|
||||
@@ -20,6 +22,9 @@
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -27,9 +32,11 @@
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"check_pending_time": 10,
|
||||
@@ -44,6 +51,9 @@
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"rollup_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
]
|
||||
@@ -51,17 +61,18 @@
|
||||
"batch_proposer_config": {
|
||||
"proof_generation_freq": 1,
|
||||
"batch_gas_threshold": 3000000,
|
||||
"batch_tx_num_threshold": 135,
|
||||
"batch_tx_num_threshold": 44,
|
||||
"batch_time_sec": 300,
|
||||
"batch_blocks_limit": 100,
|
||||
"skipped_opcodes": [
|
||||
"CREATE2",
|
||||
"DELEGATECALL"
|
||||
]
|
||||
"commit_tx_calldata_size_limit": 200000,
|
||||
"public_input_config": {
|
||||
"max_tx_num": 44,
|
||||
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,11 +16,9 @@ func TestConfig(t *testing.T) {
|
||||
cfg, err := config.NewConfig("../config.json")
|
||||
assert.True(t, assert.NoError(t, err), "failed to load config")
|
||||
|
||||
assert.True(t, len(cfg.L2Config.BatchProposerConfig.SkippedOpcodes) > 0)
|
||||
|
||||
assert.True(t, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
|
||||
assert.True(t, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
|
||||
assert.True(t, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys) > 0)
|
||||
assert.Equal(t, 1, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys))
|
||||
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys))
|
||||
assert.Equal(t, 1, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys))
|
||||
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -13,10 +13,12 @@ type L1Config struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The messenger contract address deployed on layer 1 chain.
|
||||
// The L1ScrollMessenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The rollup contract address deployed on layer 1 chain.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address"`
|
||||
// The L1MessageQueue contract address deployed on layer 1 chain.
|
||||
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
|
||||
// The ScrollChain contract address deployed on layer 1 chain.
|
||||
ScrollChainContractAddress common.Address `json:"scroll_chain_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
@@ -15,7 +15,9 @@ type L2Config struct {
|
||||
// l2geth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The messenger contract address deployed on layer 2 chain.
|
||||
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
|
||||
L2MessengerAddress common.Address `json:"l2_messenger_address"`
|
||||
// The L2MessageQueue contract address deployed on layer 2 chain.
|
||||
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
// The batch_proposer config
|
||||
@@ -34,42 +36,8 @@ type BatchProposerConfig struct {
|
||||
BatchTimeSec uint64 `json:"batch_time_sec"`
|
||||
// Max number of blocks in a batch
|
||||
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
|
||||
// Skip generating proof when that opcodes appeared
|
||||
SkippedOpcodes map[string]struct{} `json:"-"`
|
||||
}
|
||||
|
||||
// batchProposerConfigAlias RelayerConfig alias name
|
||||
type batchProposerConfigAlias BatchProposerConfig
|
||||
|
||||
// UnmarshalJSON unmarshal BatchProposerConfig config struct.
|
||||
func (b *BatchProposerConfig) UnmarshalJSON(input []byte) error {
|
||||
var jsonConfig struct {
|
||||
batchProposerConfigAlias
|
||||
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(input, &jsonConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = BatchProposerConfig(jsonConfig.batchProposerConfigAlias)
|
||||
b.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
|
||||
for _, opcode := range jsonConfig.SkippedOpcodes {
|
||||
b.SkippedOpcodes[opcode] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON marshal BatchProposerConfig in order to transfer skipOpcodes.
|
||||
func (b *BatchProposerConfig) MarshalJSON() ([]byte, error) {
|
||||
jsonConfig := struct {
|
||||
batchProposerConfigAlias
|
||||
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
|
||||
}{batchProposerConfigAlias(*b), nil}
|
||||
|
||||
// Load skipOpcodes.
|
||||
for op := range b.SkippedOpcodes {
|
||||
jsonConfig.SkippedOpcodes = append(jsonConfig.SkippedOpcodes, op)
|
||||
}
|
||||
|
||||
return json.Marshal(&jsonConfig)
|
||||
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas
|
||||
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
|
||||
// The public input hash config
|
||||
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
|
||||
}
|
||||
|
||||
@@ -41,11 +41,14 @@ type RelayerConfig struct {
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// MessengerContractAddress store the scroll messenger contract address.
|
||||
MessengerContractAddress common.Address `json:"messenger_contract_address"`
|
||||
// GasPriceOracleContractAddress store the scroll messenger contract address.
|
||||
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
|
||||
// sender config
|
||||
SenderConfig *SenderConfig `json:"sender_config"`
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
}
|
||||
|
||||
// relayerConfigAlias RelayerConfig alias name
|
||||
@@ -56,15 +59,17 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
var jsonConfig struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(input, &jsonConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get messenger private key list.
|
||||
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
|
||||
|
||||
// Get messenger private key list.
|
||||
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
@@ -73,6 +78,15 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
|
||||
}
|
||||
|
||||
// Get gas oracle private key list.
|
||||
for _, privStr := range jsonConfig.GasOracleSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
|
||||
}
|
||||
r.GasOracleSenderPrivateKeys = append(r.GasOracleSenderPrivateKeys, priv)
|
||||
}
|
||||
|
||||
// Get rollup private key
|
||||
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
@@ -90,15 +104,21 @@ func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
|
||||
jsonConfig := struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
}{relayerConfigAlias(*r), nil, nil}
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys,omitempty"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
}{relayerConfigAlias(*r), nil, nil, nil}
|
||||
|
||||
// Transfer message sender private keys to hex type.
|
||||
for _, priv := range r.MessageSenderPrivateKeys {
|
||||
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.GasOracleSenderPrivateKeys {
|
||||
jsonConfig.GasOracleSenderPrivateKeys = append(jsonConfig.GasOracleSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.RollupSenderPrivateKeys {
|
||||
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
|
||||
@@ -3,7 +3,6 @@ module scroll-tech/bridge
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6
|
||||
github.com/stretchr/testify v1.8.0
|
||||
@@ -23,6 +22,7 @@ require (
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
|
||||
@@ -71,6 +71,7 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
|
||||
@@ -31,7 +31,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
|
||||
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.L1MessageQueueAddress, cfg.ScrollChainContractAddress, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
|
||||
@@ -13,7 +13,9 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
@@ -27,49 +29,61 @@ import (
|
||||
// Actions are triggered by new head from layer 1 geth node.
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
sender *sender.Sender
|
||||
ctx context.Context
|
||||
|
||||
db orm.L1MessageOrm
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
confirmationCh <-chan *sender.Confirmation
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
l2MessengerABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
gasOracleCh <-chan *sender.Confirmation
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Warn("new L2MessengerABI failed", "err", err)
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
// @todo make sure only one sender is available
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new sender failed", "main address", addr.String(), "err", err)
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
sender: sender,
|
||||
db: db,
|
||||
l2MessengerABI: l2MessengerABI,
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
confirmationCh: sender.ConfirmChan(),
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
gasOracleCh: gasOracleSender.ConfirmChan(),
|
||||
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
|
||||
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
|
||||
msgs, err := r.db.GetL1MessagesByStatus(types.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
@@ -89,74 +103,132 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
// @todo add support to relay multiple messages
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l2MessengerABI.Pack("relayMessage", from, target, value, fee, deadline, msgNonce, calldata)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
latestBlockHeight, err := r.db.GetLatestL1BlockHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
|
||||
"number": latestBlockHeight,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Failed to GetL1BlockInfos from db", "height", latestBlockHeight, "err", err)
|
||||
return
|
||||
}
|
||||
if len(blocks) != 1 {
|
||||
log.Error("Block not exist", "height", latestBlockHeight)
|
||||
return
|
||||
}
|
||||
block := blocks[0]
|
||||
|
||||
if block.GasOracleStatus == types.GasOraclePending {
|
||||
baseFee := big.NewInt(int64(block.BaseFee))
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", baseFee)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer1Relayer) Start() {
|
||||
go func() {
|
||||
// trigger by timer
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
loop := func(ctx context.Context, f func()) {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// number, err := r.client.BlockNumber(r.ctx)
|
||||
// log.Info("receive header", "height", number)
|
||||
r.ProcessSavedEvents()
|
||||
case cfm := <-r.confirmationCh:
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case <-r.stopCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go loop(ctx, r.ProcessSavedEvents)
|
||||
go loop(ctx, r.ProcessGasPriceOracle)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageCh:
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleCh:
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,16 @@ import (
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
@@ -32,9 +34,9 @@ type relayedMessage struct {
|
||||
}
|
||||
|
||||
type rollupEvent struct {
|
||||
batchID common.Hash
|
||||
txHash common.Hash
|
||||
status orm.RollupStatus
|
||||
batchHash common.Hash
|
||||
txHash common.Hash
|
||||
status types.RollupStatus
|
||||
}
|
||||
|
||||
// Watcher will listen for smart contract events from Eth L1.
|
||||
@@ -44,22 +46,28 @@ type Watcher struct {
|
||||
db database.OrmFactory
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
rollupAddress common.Address
|
||||
rollupABI *abi.ABI
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
scrollChainAddress common.Address
|
||||
scrollChainABI *abi.ABI
|
||||
|
||||
// The height of the block that the watcher has retrieved event logs
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
stop chan bool
|
||||
stopCh chan bool
|
||||
}
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
@@ -69,55 +77,147 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
savedHeight = int64(startHeight)
|
||||
}
|
||||
|
||||
stop := make(chan bool)
|
||||
savedL1BlockHeight, err := db.GetLatestL1BlockHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
savedL1BlockHeight = 0
|
||||
}
|
||||
if savedL1BlockHeight < startHeight {
|
||||
savedL1BlockHeight = startHeight
|
||||
}
|
||||
|
||||
stopCh := make(chan bool)
|
||||
|
||||
return &Watcher{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
db: db,
|
||||
confirmations: confirmations,
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
rollupAddress: rollupAddress,
|
||||
rollupABI: bridge_abi.RollupMetaABI,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
stop: stop,
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
db: db,
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L1ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L1MessageQueueABI,
|
||||
|
||||
scrollChainAddress: scrollChainAddress,
|
||||
scrollChainABI: bridge_abi.ScrollChainABI,
|
||||
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
stopCh: stopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the Watcher module.
|
||||
func (w *Watcher) Start() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
ctx, cancel := context.WithCancel(w.ctx)
|
||||
|
||||
for ; true; <-ticker.C {
|
||||
select {
|
||||
case <-w.stop:
|
||||
return
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
default:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.FetchBlockHeader(number); err != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the Watcher module, for a graceful shutdown.
|
||||
func (w *Watcher) Stop() {
|
||||
w.stop <- true
|
||||
w.stopCh <- true
|
||||
}
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *Watcher) FetchBlockHeader(blockHeight uint64) error {
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
var blocks []*types.L1BlockInfo
|
||||
var err error
|
||||
height := fromBlock
|
||||
for ; height <= toBlock; height++ {
|
||||
var block *geth_types.Header
|
||||
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", height, "err", err)
|
||||
break
|
||||
}
|
||||
blocks = append(blocks, &types.L1BlockInfo{
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: block.BaseFee.Uint64(),
|
||||
})
|
||||
}
|
||||
|
||||
// failed at first block, return with the error
|
||||
if height == fromBlock {
|
||||
return err
|
||||
}
|
||||
toBlock = height - 1
|
||||
|
||||
// insert succeed blocks
|
||||
err = w.db.InsertL1Blocks(w.ctx, blocks)
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
defer func() {
|
||||
@@ -140,16 +240,17 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.rollupAddress,
|
||||
w.scrollChainAddress,
|
||||
w.messageQueueAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
|
||||
query.Topics[0][0] = bridge_abi.L1QueueTransactionEventSignature
|
||||
query.Topics[0][1] = bridge_abi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridge_abi.L1FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridge_abi.L1CommitBatchEventSignature
|
||||
query.Topics[0][4] = bridge_abi.L1FinalizeBatchEventSignature
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -171,29 +272,29 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchIDs []string
|
||||
var batchHashes []string
|
||||
for _, event := range rollupEvents {
|
||||
batchIDs = append(batchIDs, event.batchID.String())
|
||||
batchHashes = append(batchHashes, event.batchHash.String())
|
||||
}
|
||||
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
|
||||
statuses, err := w.db.GetRollupStatusByHashList(batchHashes)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByIDList", "err", err)
|
||||
log.Error("Failed to GetRollupStatusByHashList", "err", err)
|
||||
return err
|
||||
}
|
||||
if len(statuses) != len(batchIDs) {
|
||||
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
|
||||
if len(statuses) != len(batchHashes) {
|
||||
log.Error("RollupStatus.Length mismatch with batchHashes.Length", "RollupStatus.Length", len(statuses), "batchHashes.Length", len(batchHashes))
|
||||
return nil
|
||||
}
|
||||
|
||||
for index, event := range rollupEvents {
|
||||
batchID := event.batchID.String()
|
||||
batchHash := event.batchHash.String()
|
||||
status := statuses[index]
|
||||
// only update when db status is before event status
|
||||
if event.status > status {
|
||||
if event.status == orm.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
} else if event.status == orm.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
|
||||
if event.status == types.RollupFinalized {
|
||||
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
} else if event.status == types.RollupCommitted {
|
||||
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
@@ -205,14 +306,13 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
var msgStatus types.MsgStatus
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
msgStatus = types.MsgConfirmed
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
msgStatus = types.MsgFailed
|
||||
}
|
||||
if err != nil {
|
||||
if err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -229,108 +329,87 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
func (w *Watcher) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1Messages []*orm.L1Message
|
||||
var l1Messages []*types.L1Message
|
||||
var relayedMessages []relayedMessage
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
Value *big.Int // uint256
|
||||
Fee *big.Int // uint256
|
||||
Deadline *big.Int // uint256
|
||||
Message []byte
|
||||
MessageNonce *big.Int // uint256
|
||||
GasLimit *big.Int // uint256
|
||||
}{}
|
||||
|
||||
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
|
||||
case bridge_abi.L1QueueTransactionEventSignature:
|
||||
event := bridge_abi.L1QueueTransactionEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
|
||||
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
// target is in topics[1]
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l1Messages = append(l1Messages, &orm.L1Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
|
||||
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
|
||||
l1Messages = append(l1Messages, &types.L1Message{
|
||||
QueueIndex: event.QueueIndex.Uint64(),
|
||||
MsgHash: msgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
Fee: event.Fee.String(),
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Deadline: event.Deadline.Uint64(),
|
||||
Target: event.Target.String(),
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Calldata: common.Bytes2Hex(event.Data),
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L1RelayedMessageEventSignature:
|
||||
event := bridge_abi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L1FailedRelayedMessageEventSignature:
|
||||
event := bridge_abi.L1FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchIndex *big.Int
|
||||
ParentHash common.Hash
|
||||
}{}
|
||||
// BatchID is in topics[1]
|
||||
event.BatchID = common.HexToHash(vLog.Topics[1].String())
|
||||
err := w.rollupABI.UnpackIntoInterface(&event, "CommitBatch", vLog.Data)
|
||||
case bridge_abi.L1CommitBatchEventSignature:
|
||||
event := bridge_abi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
batchID: event.BatchID,
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupCommitted,
|
||||
batchHash: event.BatchHash,
|
||||
txHash: vLog.TxHash,
|
||||
status: types.RollupCommitted,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchIndex *big.Int
|
||||
ParentHash common.Hash
|
||||
}{}
|
||||
// BatchID is in topics[1]
|
||||
event.BatchID = common.HexToHash(vLog.Topics[1].String())
|
||||
err := w.rollupABI.UnpackIntoInterface(&event, "FinalizeBatch", vLog.Data)
|
||||
case bridge_abi.L1FinalizeBatchEventSignature:
|
||||
event := bridge_abi.L1FinalizeBatchEvent{}
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
batchID: event.BatchID,
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupFinalized,
|
||||
batchHash: event.BatchHash,
|
||||
txHash: vLog.TxHash,
|
||||
status: types.RollupFinalized,
|
||||
})
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
|
||||
@@ -23,7 +23,7 @@ func testStartWatcher(t *testing.T) {
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
}
|
||||
|
||||
@@ -14,10 +14,11 @@ import (
|
||||
// Backend manage the resources and services of L2 backend.
|
||||
// The backend should monitor events in layer 2 and relay transactions to layer 1
|
||||
type Backend struct {
|
||||
cfg *config.L2Config
|
||||
l2Watcher *WatcherClient
|
||||
relayer *Layer2Relayer
|
||||
orm database.OrmFactory
|
||||
cfg *config.L2Config
|
||||
watcher *WatcherClient
|
||||
relayer *Layer2Relayer
|
||||
batchProposer *BatchProposer
|
||||
orm database.OrmFactory
|
||||
}
|
||||
|
||||
// New returns a new instance of Backend.
|
||||
@@ -29,32 +30,37 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
|
||||
|
||||
// Note: initialize watcher before relayer to keep DB consistent.
|
||||
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
|
||||
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
|
||||
watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.L2MessengerAddress, cfg.L2MessageQueueAddress, orm)
|
||||
|
||||
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(ctx, client, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batchProposer := NewBatchProposer(ctx, cfg.BatchProposerConfig, relayer, orm)
|
||||
|
||||
return &Backend{
|
||||
cfg: cfg,
|
||||
l2Watcher: l2Watcher,
|
||||
relayer: relayer,
|
||||
orm: orm,
|
||||
cfg: cfg,
|
||||
watcher: watcher,
|
||||
relayer: relayer,
|
||||
batchProposer: batchProposer,
|
||||
orm: orm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start Backend module.
|
||||
func (l2 *Backend) Start() error {
|
||||
l2.l2Watcher.Start()
|
||||
l2.watcher.Start()
|
||||
l2.relayer.Start()
|
||||
l2.batchProposer.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop Backend module.
|
||||
func (l2 *Backend) Stop() {
|
||||
l2.l2Watcher.Stop()
|
||||
l2.batchProposer.Stop()
|
||||
l2.relayer.Stop()
|
||||
l2.watcher.Stop()
|
||||
}
|
||||
|
||||
// APIs collect API modules.
|
||||
@@ -63,7 +69,7 @@ func (l2 *Backend) APIs() []rpc.API {
|
||||
{
|
||||
Namespace: "l2",
|
||||
Version: "1.0",
|
||||
Service: WatcherAPI(l2.l2Watcher),
|
||||
Service: WatcherAPI(l2.watcher),
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,107 +1,30 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
bridgeabi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
type batchProposer struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchTxNumThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
|
||||
proofGenerationFreq uint64
|
||||
skippedOpcodes map[string]struct{}
|
||||
}
|
||||
|
||||
func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory) *batchProposer {
|
||||
return &batchProposer{
|
||||
mutex: sync.Mutex{},
|
||||
orm: orm,
|
||||
batchTimeSec: cfg.BatchTimeSec,
|
||||
batchGasThreshold: cfg.BatchGasThreshold,
|
||||
batchTxNumThreshold: cfg.BatchTxNumThreshold,
|
||||
batchBlocksLimit: cfg.BatchBlocksLimit,
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
skippedOpcodes: cfg.SkippedOpcodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) tryProposeBatch() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
blocks, err := w.orm.GetUnbatchedBlocks(
|
||||
map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
|
||||
)
|
||||
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
|
||||
// in all blocks included in the batch.
|
||||
func AddBatchInfoToDB(db database.OrmFactory, batchData *types.BatchData) error {
|
||||
dbTx, err := db.Beginx()
|
||||
if err != nil {
|
||||
log.Error("failed to get unbatched blocks", "err", err)
|
||||
return
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > w.batchGasThreshold {
|
||||
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
|
||||
if _, err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > w.batchTxNumThreshold {
|
||||
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
|
||||
if _, err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
length = len(blocks)
|
||||
gasUsed, txNum uint64
|
||||
)
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blocks {
|
||||
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
|
||||
blocks = blocks[:i]
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.createBatchForBlocks(blocks); err != nil {
|
||||
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) (string, error) {
|
||||
dbTx, err := w.orm.Beginx()
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
|
||||
var dbTxErr error
|
||||
@@ -113,28 +36,321 @@ func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) (string, e
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
batchID string
|
||||
startBlock = blocks[0]
|
||||
endBlock = blocks[len(blocks)-1]
|
||||
txNum, gasUsed uint64
|
||||
blockIDs = make([]uint64, len(blocks))
|
||||
)
|
||||
for i, block := range blocks {
|
||||
txNum += block.TxNum
|
||||
gasUsed += block.GasUsed
|
||||
blockIDs[i] = block.Number
|
||||
if dbTxErr = db.NewBatchInDBTx(dbTx, batchData); dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
batchID, dbTxErr = w.orm.NewBatchInDBTx(dbTx, startBlock, endBlock, startBlock.ParentHash, txNum, gasUsed)
|
||||
if dbTxErr != nil {
|
||||
return "", dbTxErr
|
||||
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
|
||||
for i, block := range batchData.Batch.Blocks {
|
||||
blockIDs[i] = block.BlockNumber
|
||||
}
|
||||
|
||||
if dbTxErr = w.orm.SetBatchIDForBlocksInDBTx(dbTx, blockIDs, batchID); dbTxErr != nil {
|
||||
return "", dbTxErr
|
||||
if dbTxErr = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchData.Hash().Hex()); dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
dbTxErr = dbTx.Commit()
|
||||
return batchID, dbTxErr
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
// BatchProposer sends batches commit transactions to relayer.
|
||||
type BatchProposer struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
orm database.OrmFactory
|
||||
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchTxNumThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
commitCalldataSizeLimit uint64
|
||||
batchDataBufferSizeLimit uint64
|
||||
|
||||
proofGenerationFreq uint64
|
||||
batchDataBuffer []*types.BatchData
|
||||
relayer *Layer2Relayer
|
||||
|
||||
piCfg *types.PublicInputHashConfig
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewBatchProposer will return a new instance of BatchProposer.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *Layer2Relayer, orm database.OrmFactory) *BatchProposer {
|
||||
p := &BatchProposer{
|
||||
mutex: sync.Mutex{},
|
||||
ctx: ctx,
|
||||
orm: orm,
|
||||
batchTimeSec: cfg.BatchTimeSec,
|
||||
batchGasThreshold: cfg.BatchGasThreshold,
|
||||
batchTxNumThreshold: cfg.BatchTxNumThreshold,
|
||||
batchBlocksLimit: cfg.BatchBlocksLimit,
|
||||
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
|
||||
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
piCfg: cfg.PublicInputConfig,
|
||||
relayer: relayer,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// for graceful restart.
|
||||
p.recoverBatchDataBuffer()
|
||||
|
||||
// try to commit the leftover pending batches
|
||||
p.tryCommitBatches()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Start the Listening process
|
||||
func (p *BatchProposer) Start() {
|
||||
go func() {
|
||||
if reflect.ValueOf(p.orm).IsNil() {
|
||||
panic("must run BatchProposer with DB")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(p.ctx)
|
||||
|
||||
// batch proposer loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
p.tryProposeBatch()
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-p.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the Watcher module, for a graceful shutdown.
|
||||
func (p *BatchProposer) Stop() {
|
||||
p.stopCh <- struct{}{}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recoverBatchDataBuffer() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchHashes, err := p.orm.GetPendingBatches(math.MaxInt32)
|
||||
if err != nil {
|
||||
log.Crit("Failed to fetch pending L2 batches", "err", err)
|
||||
}
|
||||
if len(batchHashes) == 0 {
|
||||
return
|
||||
}
|
||||
log.Info("Load pending batches into batchDataBuffer")
|
||||
|
||||
// helper function to cache and get BlockBatch from DB
|
||||
blockBatchCache := make(map[string]*types.BlockBatch)
|
||||
getBlockBatch := func(batchHash string) (*types.BlockBatch, error) {
|
||||
if blockBatch, ok := blockBatchCache[batchHash]; ok {
|
||||
return blockBatch, nil
|
||||
}
|
||||
blockBatches, err := p.orm.GetBlockBatches(map[string]interface{}{"hash": batchHash})
|
||||
if err != nil || len(blockBatches) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
blockBatchCache[batchHash] = blockBatches[0]
|
||||
return blockBatches[0], nil
|
||||
}
|
||||
|
||||
// recover the in-memory batchData from DB
|
||||
for _, batchHash := range batchHashes {
|
||||
log.Info("recover batch data from pending batch", "batch_hash", batchHash)
|
||||
blockBatch, err := getBlockBatch(batchHash)
|
||||
if err != nil {
|
||||
log.Error("could not get BlockBatch", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
parentBatch, err := getBlockBatch(blockBatch.ParentHash)
|
||||
if err != nil {
|
||||
log.Error("could not get parent BlockBatch", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
blockInfos, err := p.orm.GetL2BlockInfos(
|
||||
map[string]interface{}{"batch_hash": batchHash},
|
||||
"order by number ASC",
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
if len(blockInfos) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) {
|
||||
log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB",
|
||||
"len(blockInfos)", len(blockInfos),
|
||||
"expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1)
|
||||
continue
|
||||
}
|
||||
|
||||
batchData, err := p.generateBatchData(parentBatch, blockInfos)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if batchData.Hash().Hex() != batchHash {
|
||||
log.Error("the hash from recovered batch data mismatches the DB entry",
|
||||
"recovered_batch_hash", batchData.Hash().Hex(),
|
||||
"expected", batchHash)
|
||||
continue
|
||||
}
|
||||
|
||||
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) tryProposeBatch() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
if p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
|
||||
blocks, err := p.orm.GetUnbatchedL2Blocks(
|
||||
map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", p.batchBlocksLimit),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to get unbatched blocks", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.proposeBatch(blocks)
|
||||
}
|
||||
|
||||
p.tryCommitBatches()
|
||||
}
|
||||
|
||||
func (p *BatchProposer) tryCommitBatches() {
|
||||
// estimate the calldata length to determine whether to commit the pending batches
|
||||
index := 0
|
||||
commit := false
|
||||
calldataByteLen := uint64(0)
|
||||
for ; index < len(p.batchDataBuffer); index++ {
|
||||
calldataByteLen += bridgeabi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
|
||||
if calldataByteLen > p.commitCalldataSizeLimit {
|
||||
commit = true
|
||||
if index == 0 {
|
||||
log.Warn(
|
||||
"The calldata size of one batch is larger than the threshold",
|
||||
"batch_hash", p.batchDataBuffer[0].Hash().Hex(),
|
||||
"calldata_size", calldataByteLen,
|
||||
)
|
||||
index = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !commit {
|
||||
return
|
||||
}
|
||||
|
||||
// Send commit tx for batchDataBuffer[0:index]
|
||||
log.Info("Commit batches", "start_index", p.batchDataBuffer[0].Batch.BatchIndex,
|
||||
"end_index", p.batchDataBuffer[index-1].Batch.BatchIndex)
|
||||
err := p.relayer.SendCommitTx(p.batchDataBuffer[:index])
|
||||
if err != nil {
|
||||
// leave the retry to the next ticker
|
||||
log.Error("SendCommitTx failed", "error", err)
|
||||
} else {
|
||||
// pop the processed batches from the buffer
|
||||
p.batchDataBuffer = p.batchDataBuffer[index:]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
|
||||
if len(blocks) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > p.batchGasThreshold {
|
||||
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
|
||||
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > p.batchTxNumThreshold {
|
||||
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
|
||||
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
length = len(blocks)
|
||||
gasUsed, txNum uint64
|
||||
)
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blocks {
|
||||
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
|
||||
blocks = blocks[:i]
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if length == len(blocks) && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.createBatchForBlocks(blocks); err != nil {
|
||||
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {
|
||||
lastBatch, err := p.orm.GetLatestBatch()
|
||||
if err != nil {
|
||||
// We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block.
|
||||
return err
|
||||
}
|
||||
|
||||
batchData, err := p.generateBatchData(lastBatch, blocks)
|
||||
if err != nil {
|
||||
log.Error("createBatchData failed", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := AddBatchInfoToDB(p.orm, batchData); err != nil {
|
||||
log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) generateBatchData(parentBatch *types.BlockBatch, blocks []*types.BlockInfo) (*types.BatchData, error) {
|
||||
var traces []*geth_types.BlockTrace
|
||||
for _, block := range blocks {
|
||||
trs, err := p.orm.GetL2BlockTraces(map[string]interface{}{"hash": block.Hash})
|
||||
if err != nil || len(trs) != 1 {
|
||||
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
traces = append(traces, trs[0])
|
||||
}
|
||||
return types.NewBatchData(parentBatch, traces, p.piCfg), nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
|
||||
for _, batchData := range p.batchDataBuffer {
|
||||
size += bridgeabi.GetBatchCalldataLength(&batchData.Batch)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -15,48 +14,90 @@ import (
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
func testBatchProposer(t *testing.T) {
|
||||
func testBatchProposerProposeBatch(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
trace2 := &types.BlockTrace{}
|
||||
trace3 := &types.BlockTrace{}
|
||||
|
||||
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
err = json.Unmarshal(data, trace3)
|
||||
assert.NoError(t, err)
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
|
||||
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace1}))
|
||||
|
||||
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(0))
|
||||
l2cfg := cfg.L2Config
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
|
||||
wc.Start()
|
||||
defer wc.Stop()
|
||||
|
||||
proposer := newBatchProposer(&config.BatchProposerConfig{
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, db)
|
||||
}, relayer, db)
|
||||
proposer.tryProposeBatch()
|
||||
|
||||
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
|
||||
infos, err := db.GetUnbatchedL2Blocks(map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", 100))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(infos) == 0)
|
||||
assert.Equal(t, 0, len(infos))
|
||||
|
||||
exist, err := db.BatchRecordExist(id)
|
||||
exist, err := db.BatchRecordExist(batchData1.Hash().Hex())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
}
|
||||
|
||||
func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, db.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace2}))
|
||||
|
||||
// Insert block batch into db.
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData2))
|
||||
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
|
||||
batchData1.Batch.Blocks[0].BlockNumber}, batchData1.Hash().Hex()))
|
||||
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
|
||||
batchData2.Batch.Blocks[0].BlockNumber}, batchData2.Hash().Hex()))
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
assert.NoError(t, db.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized))
|
||||
|
||||
batchHashes, err := db.GetPendingBatches(math.MaxInt32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batchHashes))
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
|
||||
batchHashes, err = db.GetPendingBatches(math.MaxInt32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(batchHashes))
|
||||
|
||||
exist, err := db.BatchRecordExist(batchData2.Hash().Hex())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
@@ -22,6 +27,14 @@ var (
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
blockTrace1 *geth_types.BlockTrace
|
||||
blockTrace2 *geth_types.BlockTrace
|
||||
|
||||
// batch data
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
@@ -47,6 +60,40 @@ func setupEnv(t *testing.T) (err error) {
|
||||
l2Cli, err = ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace1 = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, blockTrace1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentBatch1 := &types.BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData1 = types.NewBatchData(parentBatch1, []*geth_types.BlockTrace{blockTrace1}, nil)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace2 = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, blockTrace2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
}
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
|
||||
|
||||
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
|
||||
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -75,11 +122,12 @@ func TestFunction(t *testing.T) {
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
t.Run("testBatchProposer", testBatchProposer)
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
|
||||
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
|
||||
@@ -13,12 +13,15 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
@@ -35,6 +38,8 @@ import (
|
||||
type Layer2Relayer struct {
|
||||
ctx context.Context
|
||||
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
@@ -46,23 +51,27 @@ type Layer2Relayer struct {
|
||||
rollupCh <-chan *sender.Confirmation
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
gasOracleCh <-chan *sender.Confirmation
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
|
||||
// A list of processing batch commitment.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
processingCommitment sync.Map
|
||||
// A list of processing batches commitment.
|
||||
// key(string): confirmation ID, value([]string): batch hashes.
|
||||
processingBatchesCommitment sync.Map
|
||||
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -76,20 +85,35 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Error("Failed to create gas oracle sender", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.RollupMetaABI,
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
|
||||
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.ScrollChainABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
gasOracleCh: gasOracleSender.ConfirmChan(),
|
||||
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
|
||||
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingBatchesCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -105,7 +129,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgPending},
|
||||
map[string]interface{}{"status": types.MsgPending},
|
||||
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
@@ -125,7 +149,7 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
return r.processSavedEvent(msg)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
@@ -137,13 +161,24 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
// Get the block info that contains the message
|
||||
blockInfos, err := r.db.GetL2BlockInfos(map[string]interface{}{"number": msg.Height})
|
||||
if err != nil {
|
||||
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
|
||||
}
|
||||
blockInfo := blockInfos[0]
|
||||
if !blockInfo.BatchHash.Valid {
|
||||
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: rebuild the withdraw trie to generate the merkle proof
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
BatchHash: common.HexToHash(blockInfo.BatchHash.String),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
@@ -154,11 +189,9 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
@@ -167,10 +200,10 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
@@ -182,7 +215,7 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
|
||||
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
@@ -191,97 +224,93 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches(1)
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.db.GetLatestBatch()
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batchesInDB) == 0 {
|
||||
return
|
||||
}
|
||||
id := batchesInDB[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
|
||||
return
|
||||
}
|
||||
batch := batches[0]
|
||||
|
||||
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
|
||||
if err != nil || len(traces) == 0 {
|
||||
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: batch.Index,
|
||||
ParentHash: common.HexToHash(batch.ParentHash),
|
||||
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
|
||||
}
|
||||
|
||||
parentHash := common.HexToHash(batch.ParentHash)
|
||||
for i, trace := range traces {
|
||||
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: parentHash,
|
||||
BaseFee: trace.Header.BaseFee,
|
||||
StateRoot: trace.StorageTrace.RootAfter,
|
||||
BlockHeight: trace.Header.Number.Uint64(),
|
||||
GasUsed: 0,
|
||||
Timestamp: trace.Header.Time,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
Value: tx.Value.ToInt(),
|
||||
Data: common.Hex2Bytes(tx.Data),
|
||||
R: tx.R.ToInt(),
|
||||
S: tx.S.ToInt(),
|
||||
V: tx.V.ToInt().Uint64(),
|
||||
}
|
||||
if tx.To != nil {
|
||||
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
|
||||
}
|
||||
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
|
||||
if batch.OracleStatus == types.GasOraclePending {
|
||||
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// for next iteration
|
||||
parentHash = layer2Batch.Blocks[i].BlockHash
|
||||
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "block.BaseFee", suggestGasPrice.Uint64(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendCommitTx sends commitBatches tx to L1.
|
||||
func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
|
||||
if len(batchData) == 0 {
|
||||
log.Error("SendCommitTx receives empty batch")
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
|
||||
// pack calldata
|
||||
commitBatches := make([]bridge_abi.IScrollChainBatch, len(batchData))
|
||||
for i, batch := range batchData {
|
||||
commitBatches[i] = batch.Batch
|
||||
}
|
||||
calldata, err := r.l1RollupABI.Pack("commitBatches", commitBatches)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
|
||||
return
|
||||
log.Error("Failed to pack commitBatches",
|
||||
"error", err,
|
||||
"start_batch_index", commitBatches[0].BatchIndex,
|
||||
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
|
||||
return err
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
|
||||
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
// generate a unique txID and send transaction
|
||||
var bytes []byte
|
||||
for _, batch := range batchData {
|
||||
bytes = append(bytes, batch.Hash().Bytes()...)
|
||||
}
|
||||
txID := crypto.Keccak256Hash(bytes).String()
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
|
||||
log.Info("Sent the commitBatches tx to layer1",
|
||||
"tx_hash", txHash.Hex(),
|
||||
"start_batch_index", commitBatches[0].BatchIndex,
|
||||
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
batchHashes := make([]string, len(batchData))
|
||||
for i, batch := range batchData {
|
||||
batchHashes[i] = batch.Hash().Hex()
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
|
||||
}
|
||||
}
|
||||
r.processingCommitment.Store(txID, id)
|
||||
r.processingBatchesCommitment.Store(txID, batchHashes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
@@ -303,91 +332,91 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
if len(batches) == 0 {
|
||||
return
|
||||
}
|
||||
id := batches[0]
|
||||
hash := batches[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
status, err := r.db.GetProvingStatusByID(id)
|
||||
status, err := r.db.GetProvingStatusByHash(hash)
|
||||
if err != nil {
|
||||
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
|
||||
log.Error("GetProvingStatusByHash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskProved:
|
||||
case types.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
|
||||
}
|
||||
|
||||
case orm.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "id", id)
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByHash(hash)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by id failed", "id", id, "err", err)
|
||||
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "id", id)
|
||||
log.Warn("proof or instance not ready", "hash", hash)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
|
||||
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(instanceBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
txID := hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
hash := &txHash
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization.Store(txID, id)
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
@@ -416,8 +445,8 @@ func (r *Layer2Relayer) Start() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go loop(ctx, r.ProcessSavedEvents)
|
||||
go loop(ctx, r.ProcessPendingBatches)
|
||||
go loop(ctx, r.ProcessCommittedBatches)
|
||||
go loop(ctx, r.ProcessGasPriceOracle)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
@@ -428,6 +457,22 @@ func (r *Layer2Relayer) Start() {
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleCh:
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
@@ -453,31 +498,33 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
|
||||
transactionType = "MessageRelay"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), types.MsgConfirmed, confirmation.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
|
||||
}
|
||||
r.processingMessage.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is block commitment transaction
|
||||
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchCommitment"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
// check whether it is CommitBatches transaction
|
||||
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
for _, batchHash := range batchBatches.([]string) {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), types.RollupCommitted)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
|
||||
}
|
||||
}
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
r.processingBatchesCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is proof finalization transaction
|
||||
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
transactionType = "ProofFinalization"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), types.RollupFinalized)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
|
||||
}
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -5,27 +5,26 @@ import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL2Message = []*orm.L2Message{
|
||||
templateL2Message = []*types.L2Message{
|
||||
{
|
||||
Nonce: 1,
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "100",
|
||||
Fee: "100",
|
||||
GasLimit: 11529940,
|
||||
Deadline: uint64(time.Now().Unix()),
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer2Hash: "hash0",
|
||||
@@ -40,7 +39,7 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
@@ -55,107 +54,43 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
err = db.SaveL2Messages(context.Background(), templateL2Message)
|
||||
assert.NoError(t, err)
|
||||
|
||||
traces := []*types.BlockTrace{
|
||||
traces := []*geth_types.BlockTrace{
|
||||
{
|
||||
Header: &types.Header{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height)),
|
||||
},
|
||||
},
|
||||
{
|
||||
Header: &types.Header{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
|
||||
},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
err = db.InsertL2BlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: templateL2Message[0].Height},
|
||||
&orm.BlockInfo{Number: templateL2Message[0].Height + 1},
|
||||
"0f", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
templateL2Message[0].Height,
|
||||
templateL2Message[0].Height + 1}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
batchHash := batchData1.Hash().Hex()
|
||||
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{1}, batchHash))
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
|
||||
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessSavedEvents()
|
||||
|
||||
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.MsgSubmitted, msg.Status)
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
// this blockresult has number of 0x4, need to change it to match the testcase
|
||||
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
|
||||
var traces []*types.BlockTrace
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
blockTrace := &types.BlockTrace{}
|
||||
err = json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
assert.NoError(t, err)
|
||||
traces = append(traces, blockTrace)
|
||||
templateBlockTrace, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
blockTrace = &types.BlockTrace{}
|
||||
err = json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
assert.NoError(t, err)
|
||||
traces = append(traces, blockTrace)
|
||||
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: traces[0].Header.Number.Uint64()},
|
||||
&orm.BlockInfo{Number: traces[1].Header.Number.Uint64()},
|
||||
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[1].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
|
||||
// assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
// Check if Rollup Result is changed successfully
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
assert.Equal(t, types.MsgSubmitted, msg.Status)
|
||||
}
|
||||
|
||||
func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
@@ -166,32 +101,32 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
|
||||
batchHash := batchData1.Hash().Hex()
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupCommitted)
|
||||
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
status, err := db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
assert.Equal(t, types.RollupFinalizing, status)
|
||||
}
|
||||
|
||||
func testL2RelayerSkipBatches(t *testing.T) {
|
||||
@@ -202,46 +137,47 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
batchData := genBatchData(t, index)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
|
||||
batchHash := batchData.Hash().Hex()
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
|
||||
err = db.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, provingStatus)
|
||||
err = db.UpdateProvingStatus(batchHash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return batchID
|
||||
return batchHash
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskSkipped, 1),
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskFailed, 2),
|
||||
}
|
||||
|
||||
notSkipped := []string{
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
|
||||
createBatch(types.RollupPending, types.ProvingTaskSkipped, 3),
|
||||
createBatch(types.RollupCommitting, types.ProvingTaskSkipped, 4),
|
||||
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped, 5),
|
||||
createBatch(types.RollupFinalized, types.ProvingTaskSkipped, 6),
|
||||
createBatch(types.RollupPending, types.ProvingTaskFailed, 7),
|
||||
createBatch(types.RollupCommitting, types.ProvingTaskFailed, 8),
|
||||
createBatch(types.RollupFinalizing, types.ProvingTaskFailed, 9),
|
||||
createBatch(types.RollupFinalized, types.ProvingTaskFailed, 10),
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskVerified, 11),
|
||||
}
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
@@ -249,12 +185,27 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
for _, id := range skipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizationSkipped, status)
|
||||
assert.Equal(t, types.RollupFinalizationSkipped, status)
|
||||
}
|
||||
|
||||
for _, id := range notSkipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
|
||||
assert.NotEqual(t, types.RollupFinalizationSkipped, status)
|
||||
}
|
||||
}
|
||||
|
||||
func genBatchData(t *testing.T, index uint64) *types.BatchData {
|
||||
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
// unmarshal blockTrace
|
||||
blockTrace := &geth_types.BlockTrace{}
|
||||
err = json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
assert.NoError(t, err)
|
||||
blockTrace.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: index,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -20,10 +21,9 @@ import (
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
@@ -46,21 +46,23 @@ type WatcherClient struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
confirmations rpc.BlockNumber
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
// The height of the block that the watcher has retrieved event logs
|
||||
processedMsgHeight uint64
|
||||
|
||||
stopped uint64
|
||||
stopCh chan struct{}
|
||||
|
||||
batchProposer *batchProposer
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
@@ -73,11 +75,15 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
orm: orm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
confirmations: confirmations,
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L2MessengerMetaABI,
|
||||
stopCh: make(chan struct{}),
|
||||
stopped: 0,
|
||||
batchProposer: newBatchProposer(bpCfg, orm),
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridge_abi.L2ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridge_abi.L2MessageQueueABI,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
stopped: 0,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
@@ -101,47 +107,31 @@ func (w *WatcherClient) initializeGenesis() error {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
// EIP1559 is disabled so the RPC won't return baseFeePerGas. However, l2geth
|
||||
// still uses BaseFee when calculating the block hash. If we keep it as <nil>
|
||||
// here the genesis hash will not match.
|
||||
genesis.BaseFee = big.NewInt(0)
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
trace := &types.BlockTrace{
|
||||
blockTrace := &geth_types.BlockTrace{
|
||||
Coinbase: nil,
|
||||
Header: genesis,
|
||||
Transactions: []*types.TransactionData{},
|
||||
Transactions: []*geth_types.TransactionData{},
|
||||
StorageTrace: nil,
|
||||
ExecutionResults: []*types.ExecutionResult{},
|
||||
ExecutionResults: []*geth_types.ExecutionResult{},
|
||||
MPTWitness: nil,
|
||||
}
|
||||
|
||||
if err := w.orm.InsertBlockTraces([]*types.BlockTrace{trace}); err != nil {
|
||||
return fmt.Errorf("failed to insert block traces: %v", err)
|
||||
}
|
||||
batchData := types.NewGenesisBatchData(blockTrace)
|
||||
|
||||
blocks, err := w.orm.GetUnbatchedBlocks(map[string]interface{}{})
|
||||
if err != nil {
|
||||
if err = AddBatchInfoToDB(w.orm, batchData); err != nil {
|
||||
log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blocks) != 1 {
|
||||
return fmt.Errorf("unexpected number of unbatched blocks in db, expected: 1, actual: %v", len(blocks))
|
||||
}
|
||||
batchHash := batchData.Hash().Hex()
|
||||
|
||||
batchID, err := w.batchProposer.createBatchForBlocks(blocks)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create batch: %v", err)
|
||||
}
|
||||
|
||||
err = w.orm.UpdateProvingStatus(batchID, orm.ProvingTaskProved)
|
||||
if err != nil {
|
||||
if err = w.orm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
err = w.orm.UpdateRollupStatus(w.ctx, batchID, orm.RollupFinalized)
|
||||
if err != nil {
|
||||
if err = w.orm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
@@ -161,7 +151,7 @@ func (w *WatcherClient) Start() {
|
||||
|
||||
// trace fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
@@ -183,7 +173,7 @@ func (w *WatcherClient) Start() {
|
||||
|
||||
// event fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
@@ -203,22 +193,6 @@ func (w *WatcherClient) Start() {
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// batch proposer loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
w.batchProposer.tryProposeBatch()
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
@@ -236,9 +210,9 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
|
||||
heightInDB, err := w.orm.GetL2BlockTracesLatestHeight()
|
||||
if err != nil {
|
||||
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
|
||||
log.Error("failed to GetL2BlockTracesLatestHeight", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -264,7 +238,7 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
|
||||
}
|
||||
|
||||
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var traces []*types.BlockTrace
|
||||
var traces []*geth_types.BlockTrace
|
||||
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block trace", "height", number)
|
||||
@@ -278,7 +252,7 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
|
||||
|
||||
}
|
||||
if len(traces) > 0 {
|
||||
if err := w.orm.InsertBlockTraces(traces); err != nil {
|
||||
if err := w.orm.InsertL2BlockTraces(traces); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -310,13 +284,15 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.messageQueueAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0] = make([]common.Hash, 4)
|
||||
query.Topics[0][0] = bridge_abi.L2SentMessageEventSignature
|
||||
query.Topics[0][1] = bridge_abi.L2RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridge_abi.L2FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridge_abi.L2AppendMessageEventSignature
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -339,14 +315,13 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
var msgStatus types.MsgStatus
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
msgStatus = types.MsgConfirmed
|
||||
} else {
|
||||
// failed
|
||||
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
msgStatus = types.MsgFailed
|
||||
}
|
||||
if err != nil {
|
||||
if err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return
|
||||
}
|
||||
@@ -362,68 +337,91 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []geth_types.Log) ([]*types.L2Message, []relayedMessage, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2Messages []*orm.L2Message
|
||||
var l2Messages []*types.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
var lastAppendMsgHash common.Hash
|
||||
var lastAppendMsgNonce uint64
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
Value *big.Int // uint256
|
||||
Fee *big.Int // uint256
|
||||
Deadline *big.Int // uint256
|
||||
Message []byte
|
||||
MessageNonce *big.Int // uint256
|
||||
GasLimit *big.Int // uint256
|
||||
}{}
|
||||
|
||||
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
|
||||
case bridge_abi.L2SentMessageEventSignature:
|
||||
event := bridge_abi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Error("failed to unpack layer2 SentMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
// target is in topics[1]
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l2Messages = append(l2Messages, &orm.L2Message{
|
||||
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.MessageNonce,
|
||||
event.Message,
|
||||
)
|
||||
|
||||
// `AppendMessage` event is always emitted before `SentMessage` event
|
||||
// So they should always match, just double check
|
||||
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
|
||||
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
|
||||
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
|
||||
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
|
||||
l2Messages = append(l2Messages, &types.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
Fee: event.Fee.String(),
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Deadline: event.Deadline.Uint64(),
|
||||
Target: event.Target.String(),
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L2RelayedMessageEventSignature:
|
||||
event := bridge_abi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
// MsgHash is in topics[1]
|
||||
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
|
||||
case bridge_abi.L2FailedRelayedMessageEventSignature:
|
||||
event := bridge_abi.L2FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MsgHash,
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridge_abi.L2AppendMessageEventSignature:
|
||||
event := bridge_abi.L2AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
lastAppendMsgHash = event.MessageHash
|
||||
lastAppendMsgNonce = event.Index.Uint64()
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
|
||||
@@ -10,18 +10,18 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
@@ -32,7 +32,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer l2db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
|
||||
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2db)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -62,6 +62,11 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, db)
|
||||
wc.Start()
|
||||
defer wc.Stop()
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -73,7 +78,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
@@ -86,7 +91,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
@@ -96,7 +101,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
@@ -113,7 +118,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("Height in DB is", height)
|
||||
assert.Greater(t, height, int64(previousHeight))
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(msgs))
|
||||
}
|
||||
@@ -135,13 +140,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
var tx *types.Transaction
|
||||
var tx *geth_types.Transaction
|
||||
|
||||
for i := 0; i < numTransactions; i++ {
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
@@ -157,7 +162,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
@@ -173,7 +178,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
if receipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
@@ -185,14 +190,14 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("LatestHeight is", height)
|
||||
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": types.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, bpCfg, contractAddr, db)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -2,98 +2,120 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL1 {
|
||||
/******************************
|
||||
* Events from L1MessageQueue *
|
||||
******************************/
|
||||
|
||||
/// @notice Emitted when a new L1 => L2 transaction is appended to the queue.
|
||||
/// @param sender The address of account who initiates the transaction.
|
||||
/// @param target The address of account who will recieve the transaction.
|
||||
/// @param value The value passed with the transaction.
|
||||
/// @param queueIndex The index of this transaction in the queue.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on L2.
|
||||
/// @param data The calldata of the transaction.
|
||||
event QueueTransaction(
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
uint256 value,
|
||||
uint256 queueIndex,
|
||||
uint256 gasLimit,
|
||||
bytes data
|
||||
);
|
||||
|
||||
/*********************************
|
||||
* Events from L1ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
/// @notice Emitted when a cross domain message is sent.
|
||||
/// @param sender The address of the sender who initiates the message.
|
||||
/// @param target The address of target contract to call.
|
||||
/// @param value The amount of value passed to the target contract.
|
||||
/// @param messageNonce The nonce of the message.
|
||||
/// @param gasLimit The optional gas limit passed to L1 or L2.
|
||||
/// @param message The calldata passed to the target contract.
|
||||
event SentMessage(
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
uint256 gasLimit,
|
||||
bytes message
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
/// @notice Emitted when a cross domain message is relayed successfully.
|
||||
/// @param messageHash The hash of the message.
|
||||
event RelayedMessage(bytes32 indexed messageHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
/// @dev The maximum number of transaction in on batch.
|
||||
uint256 public immutable maxNumTxInBatch;
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
/// @dev The hash used for padding public inputs.
|
||||
bytes32 public immutable paddingTxHash;
|
||||
|
||||
/************************
|
||||
* Events from ZKRollup *
|
||||
************************/
|
||||
/***************************
|
||||
* Events from ScrollChain *
|
||||
***************************/
|
||||
|
||||
/// @notice Emitted when a new batch is commited.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
/// @param batchHash The hash of the batch
|
||||
event CommitBatch(bytes32 indexed batchHash);
|
||||
|
||||
/// @notice Emitted when a batch is reverted.
|
||||
/// @param _batchId The identification of the batch.
|
||||
event RevertBatch(bytes32 indexed _batchId);
|
||||
/// @param batchHash The identification of the batch.
|
||||
event RevertBatch(bytes32 indexed batchHash);
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param _batchHash The hash of the batch
|
||||
/// @param _batchIndex The index of the batch
|
||||
/// @param _parentHash The hash of parent batch
|
||||
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
|
||||
/// @param batchHash The hash of the batch
|
||||
event FinalizeBatch(bytes32 indexed batchHash);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
uint256 blockHeight;
|
||||
bytes merkleProof;
|
||||
}
|
||||
|
||||
/// @dev The transanction struct
|
||||
struct Layer2Transaction {
|
||||
address caller;
|
||||
uint64 nonce;
|
||||
address target;
|
||||
uint64 gas;
|
||||
uint256 gasPrice;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
// signature
|
||||
uint256 r;
|
||||
uint256 s;
|
||||
uint64 v;
|
||||
}
|
||||
|
||||
/// @dev The block header struct
|
||||
struct Layer2BlockHeader {
|
||||
struct BlockContext {
|
||||
// The hash of this block.
|
||||
bytes32 blockHash;
|
||||
// The parent hash of this block.
|
||||
bytes32 parentHash;
|
||||
uint256 baseFee;
|
||||
bytes32 stateRoot;
|
||||
uint64 blockHeight;
|
||||
uint64 gasUsed;
|
||||
// The height of this block.
|
||||
uint64 blockNumber;
|
||||
// The timestamp of this block.
|
||||
uint64 timestamp;
|
||||
bytes extraData;
|
||||
Layer2Transaction[] txs;
|
||||
// The base fee of this block.
|
||||
// Currently, it is not used, because we disable EIP-1559.
|
||||
// We keep it for future proof.
|
||||
uint256 baseFee;
|
||||
// The gas limit of this block.
|
||||
uint64 gasLimit;
|
||||
// The number of transactions in this block, both L1 & L2 txs.
|
||||
uint16 numTransactions;
|
||||
// The number of l1 messages in this block.
|
||||
uint16 numL1Messages;
|
||||
}
|
||||
|
||||
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
|
||||
struct Layer2Batch {
|
||||
struct Batch {
|
||||
// The list of blocks in this batch
|
||||
BlockContext[] blocks; // MAX_NUM_BLOCKS = 100, about 5 min
|
||||
// The state root of previous batch.
|
||||
// The first batch will use 0x0 for prevStateRoot
|
||||
bytes32 prevStateRoot;
|
||||
// The state root of the last block in this batch.
|
||||
bytes32 newStateRoot;
|
||||
// The withdraw trie root of the last block in this batch.
|
||||
bytes32 withdrawTrieRoot;
|
||||
// The index of the batch.
|
||||
uint64 batchIndex;
|
||||
// The hash of the last block in the parent batch
|
||||
bytes32 parentHash;
|
||||
Layer2BlockHeader[] blocks;
|
||||
// The parent batch hash.
|
||||
bytes32 parentBatchHash;
|
||||
// Concatenated raw data of RLP encoded L2 txs
|
||||
bytes l2Transactions;
|
||||
}
|
||||
|
||||
struct Layer2BatchStored {
|
||||
struct L2MessageProof {
|
||||
// The hash of the batch where the message belongs to.
|
||||
bytes32 batchHash;
|
||||
bytes32 parentHash;
|
||||
uint64 batchIndex;
|
||||
bool verified;
|
||||
// Concatenation of merkle proof for withdraw merkle trie.
|
||||
bytes merkleProof;
|
||||
}
|
||||
|
||||
/*************
|
||||
@@ -103,27 +125,39 @@ contract MockBridgeL1 {
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/// @notice Mapping from batch id to batch struct.
|
||||
mapping(bytes32 => Layer2BatchStored) public batches;
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor() {
|
||||
maxNumTxInBatch = 44;
|
||||
paddingTxHash = 0x0000000000000000000000000000000000000000000000000000000000000000;
|
||||
}
|
||||
|
||||
/***********************************
|
||||
* Functions from L2GasPriceOracle *
|
||||
***********************************/
|
||||
|
||||
function setL2BaseFee(uint256) external {
|
||||
}
|
||||
|
||||
/************************************
|
||||
* Functions from L1ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
address target,
|
||||
uint256 value,
|
||||
bytes calldata message,
|
||||
uint256 gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
|
||||
{
|
||||
address _sender = applyL1ToL2Alias(address(this));
|
||||
emit QueueTransaction(_sender, target, 0, messageNonce, gasLimit, _xDomainCalldata);
|
||||
}
|
||||
uint256 _nonce = messageNonce;
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
|
||||
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
|
||||
messageNonce += 1;
|
||||
}
|
||||
|
||||
@@ -131,57 +165,216 @@ contract MockBridgeL1 {
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
|
||||
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
|
||||
emit RelayedMessage(_xDomainCalldataHash);
|
||||
}
|
||||
|
||||
/***************************
|
||||
* Functions from ZKRollup *
|
||||
***************************/
|
||||
/******************************
|
||||
* Functions from ScrollChain *
|
||||
******************************/
|
||||
|
||||
function commitBatch(Layer2Batch memory _batch) external {
|
||||
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
|
||||
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
|
||||
|
||||
Layer2BatchStored storage _batchStored = batches[_batchId];
|
||||
_batchStored.batchHash = _batchHash;
|
||||
_batchStored.parentHash = _batch.parentHash;
|
||||
_batchStored.batchIndex = _batch.batchIndex;
|
||||
|
||||
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
|
||||
function commitBatch(Batch memory _batch) external {
|
||||
_commitBatch(_batch);
|
||||
}
|
||||
|
||||
function revertBatch(bytes32 _batchId) external {
|
||||
emit RevertBatch(_batchId);
|
||||
|
||||
function commitBatches(Batch[] memory _batches) external {
|
||||
for (uint256 i = 0; i < _batches.length; i++) {
|
||||
_commitBatch(_batches[i]);
|
||||
}
|
||||
}
|
||||
|
||||
function revertBatch(bytes32 _batchHash) external {
|
||||
emit RevertBatch(_batchHash);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes32 _batchId,
|
||||
bytes32 _batchHash,
|
||||
uint256[] memory,
|
||||
uint256[] memory
|
||||
) external {
|
||||
Layer2BatchStored storage _batch = batches[_batchId];
|
||||
uint256 _batchIndex = _batch.batchIndex;
|
||||
|
||||
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
|
||||
emit FinalizeBatch(_batchHash);
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute a unique batch id for mapping.
|
||||
/// @param _batchHash The hash of the batch.
|
||||
/// @param _parentHash The hash of the batch.
|
||||
/// @param _batchIndex The index of the batch.
|
||||
/// @return Return the computed batch id.
|
||||
function _computeBatchId(
|
||||
bytes32 _batchHash,
|
||||
bytes32 _parentHash,
|
||||
uint256 _batchIndex
|
||||
) internal pure returns (bytes32) {
|
||||
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
function _commitBatch(Batch memory _batch) internal {
|
||||
bytes32 _batchHash = _computePublicInputHash(_batch);
|
||||
emit CommitBatch(_batchHash);
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to generate the correct cross domain calldata for a message.
|
||||
/// @param _sender Message sender address.
|
||||
/// @param _target Target contract address.
|
||||
/// @param _value The amount of ETH pass to the target.
|
||||
/// @param _messageNonce Nonce for the provided message.
|
||||
/// @param _message Message to send to the target.
|
||||
/// @return ABI encoded cross domain calldata.
|
||||
function _encodeXDomainCalldata(
|
||||
address _sender,
|
||||
address _target,
|
||||
uint256 _value,
|
||||
uint256 _messageNonce,
|
||||
bytes memory _message
|
||||
) internal pure returns (bytes memory) {
|
||||
return
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
_sender,
|
||||
_target,
|
||||
_value,
|
||||
_messageNonce,
|
||||
_message
|
||||
);
|
||||
}
|
||||
|
||||
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
|
||||
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
|
||||
unchecked {
|
||||
l2Address = address(uint160(l1Address) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute the public input hash.
|
||||
/// @param batch The batch to compute.
|
||||
function _computePublicInputHash(Batch memory batch)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
bytes32
|
||||
)
|
||||
{
|
||||
uint256 publicInputsPtr;
|
||||
// 1. append prevStateRoot, newStateRoot and withdrawTrieRoot to public inputs
|
||||
{
|
||||
bytes32 prevStateRoot = batch.prevStateRoot;
|
||||
bytes32 newStateRoot = batch.newStateRoot;
|
||||
bytes32 withdrawTrieRoot = batch.withdrawTrieRoot;
|
||||
// number of bytes in public inputs: 32 * 3 + 124 * blocks + 32 * MAX_NUM_TXS
|
||||
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
|
||||
assembly {
|
||||
publicInputsPtr := mload(0x40)
|
||||
mstore(0x40, add(publicInputsPtr, publicInputsSize))
|
||||
mstore(publicInputsPtr, prevStateRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, newStateRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, withdrawTrieRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
uint64 numTransactionsInBatch;
|
||||
BlockContext memory _block;
|
||||
// 2. append block information to public inputs.
|
||||
for (uint256 i = 0; i < batch.blocks.length; i++) {
|
||||
// validate blocks, we won't check first block against previous batch.
|
||||
{
|
||||
BlockContext memory _currentBlock = batch.blocks[i];
|
||||
if (i > 0) {
|
||||
require(_block.blockHash == _currentBlock.parentHash, "Parent hash mismatch");
|
||||
require(_block.blockNumber + 1 == _currentBlock.blockNumber, "Block number mismatch");
|
||||
}
|
||||
_block = _currentBlock;
|
||||
}
|
||||
|
||||
// append blockHash and parentHash to public inputs
|
||||
{
|
||||
bytes32 blockHash = _block.blockHash;
|
||||
bytes32 parentHash = _block.parentHash;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, blockHash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, parentHash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
// append blockNumber and blockTimestamp to public inputs
|
||||
{
|
||||
uint256 blockNumber = _block.blockNumber;
|
||||
uint256 blockTimestamp = _block.timestamp;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, shl(192, blockNumber))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
mstore(publicInputsPtr, shl(192, blockTimestamp))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
}
|
||||
}
|
||||
// append baseFee to public inputs
|
||||
{
|
||||
uint256 baseFee = _block.baseFee;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, baseFee)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
uint64 numTransactionsInBlock = _block.numTransactions;
|
||||
// gasLimit, numTransactions and numL1Messages to public inputs
|
||||
{
|
||||
uint256 gasLimit = _block.gasLimit;
|
||||
uint256 numL1MessagesInBlock = _block.numL1Messages;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, shl(192, gasLimit))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
mstore(publicInputsPtr, shl(240, numTransactionsInBlock))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x2)
|
||||
mstore(publicInputsPtr, shl(240, numL1MessagesInBlock))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x2)
|
||||
}
|
||||
}
|
||||
numTransactionsInBatch += numTransactionsInBlock;
|
||||
}
|
||||
require(numTransactionsInBatch <= maxNumTxInBatch, "Too many transactions in batch");
|
||||
|
||||
// 3. append transaction hash to public inputs.
|
||||
uint256 _l2TxnPtr;
|
||||
{
|
||||
bytes memory l2Transactions = batch.l2Transactions;
|
||||
assembly {
|
||||
_l2TxnPtr := add(l2Transactions, 0x20)
|
||||
}
|
||||
}
|
||||
for (uint256 i = 0; i < batch.blocks.length; i++) {
|
||||
uint256 numL1MessagesInBlock = batch.blocks[i].numL1Messages;
|
||||
require(numL1MessagesInBlock == 0);
|
||||
uint256 numTransactionsInBlock = batch.blocks[i].numTransactions;
|
||||
for (uint256 j = numL1MessagesInBlock; j < numTransactionsInBlock; ++j) {
|
||||
bytes32 hash;
|
||||
assembly {
|
||||
let txPayloadLength := shr(224, mload(_l2TxnPtr))
|
||||
_l2TxnPtr := add(_l2TxnPtr, 4)
|
||||
_l2TxnPtr := add(_l2TxnPtr, txPayloadLength)
|
||||
hash := keccak256(sub(_l2TxnPtr, txPayloadLength), txPayloadLength)
|
||||
mstore(publicInputsPtr, hash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. append padding transaction to public inputs.
|
||||
bytes32 txHashPadding = paddingTxHash;
|
||||
for (uint256 i = numTransactionsInBatch; i < maxNumTxInBatch; i++) {
|
||||
assembly {
|
||||
mstore(publicInputsPtr, txHashPadding)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
// 5. compute public input hash
|
||||
bytes32 publicInputHash;
|
||||
{
|
||||
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
|
||||
assembly {
|
||||
publicInputHash := keccak256(sub(publicInputsPtr, publicInputsSize), publicInputsSize)
|
||||
}
|
||||
}
|
||||
|
||||
return publicInputHash;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,26 +2,56 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract MockBridgeL2 {
|
||||
/******************************
|
||||
* Events from L2MessageQueue *
|
||||
******************************/
|
||||
|
||||
/// @notice Emitted when a new message is added to the merkle tree.
|
||||
/// @param index The index of the corresponding message.
|
||||
/// @param messageHash The hash of the corresponding message.
|
||||
event AppendMessage(uint256 index, bytes32 messageHash);
|
||||
|
||||
/********************************
|
||||
* Events from L1BlockContainer *
|
||||
********************************/
|
||||
|
||||
/// @notice Emitted when a block is imported.
|
||||
/// @param blockHash The hash of the imported block.
|
||||
/// @param blockHeight The height of the imported block.
|
||||
/// @param blockTimestamp The timestamp of the imported block.
|
||||
/// @param baseFee The base fee of the imported block.
|
||||
/// @param stateRoot The state root of the imported block.
|
||||
event ImportBlock(
|
||||
bytes32 indexed blockHash,
|
||||
uint256 blockHeight,
|
||||
uint256 blockTimestamp,
|
||||
uint256 baseFee,
|
||||
bytes32 stateRoot
|
||||
);
|
||||
|
||||
/*********************************
|
||||
* Events from L2ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
/// @notice Emitted when a cross domain message is sent.
|
||||
/// @param sender The address of the sender who initiates the message.
|
||||
/// @param target The address of target contract to call.
|
||||
/// @param value The amount of value passed to the target contract.
|
||||
/// @param messageNonce The nonce of the message.
|
||||
/// @param gasLimit The optional gas limit passed to L1 or L2.
|
||||
/// @param message The calldata passed to the target contract.
|
||||
event SentMessage(
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
address sender,
|
||||
uint256 value,
|
||||
uint256 fee,
|
||||
uint256 deadline,
|
||||
bytes message,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit
|
||||
uint256 gasLimit,
|
||||
bytes message
|
||||
);
|
||||
|
||||
event MessageDropped(bytes32 indexed msgHash);
|
||||
|
||||
event RelayedMessage(bytes32 indexed msgHash);
|
||||
|
||||
event FailedRelayedMessage(bytes32 indexed msgHash);
|
||||
/// @notice Emitted when a cross domain message is relayed successfully.
|
||||
/// @param messageHash The hash of the message.
|
||||
event RelayedMessage(bytes32 indexed messageHash);
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -30,38 +60,70 @@ contract MockBridgeL2 {
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
/***********************************
|
||||
* Functions from L1GasPriceOracle *
|
||||
***********************************/
|
||||
|
||||
function setL1BaseFee(uint256) external {
|
||||
}
|
||||
|
||||
/************************************
|
||||
* Functions from L2ScrollMessenger *
|
||||
************************************/
|
||||
|
||||
function sendMessage(
|
||||
address _to,
|
||||
uint256 _fee,
|
||||
uint256 _value,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable {
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
uint256 _deadline = block.timestamp + 1 days;
|
||||
uint256 _nonce = messageNonce;
|
||||
uint256 _value;
|
||||
unchecked {
|
||||
_value = msg.value - _fee;
|
||||
}
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
|
||||
messageNonce = _nonce + 1;
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, _to, _value, messageNonce, _message);
|
||||
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
|
||||
|
||||
emit AppendMessage(messageNonce, _xDomainCalldataHash);
|
||||
emit SentMessage(msg.sender, _to, _value, messageNonce, _gasLimit, _message);
|
||||
|
||||
messageNonce += 1;
|
||||
}
|
||||
|
||||
function relayMessageWithProof(
|
||||
function relayMessage(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _fee,
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
bytes calldata _message
|
||||
) external {
|
||||
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
|
||||
emit RelayedMessage(_msghash);
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
|
||||
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
|
||||
emit RelayedMessage(_xDomainCalldataHash);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to generate the correct cross domain calldata for a message.
|
||||
/// @param _sender Message sender address.
|
||||
/// @param _target Target contract address.
|
||||
/// @param _value The amount of ETH pass to the target.
|
||||
/// @param _messageNonce Nonce for the provided message.
|
||||
/// @param _message Message to send to the target.
|
||||
/// @return ABI encoded cross domain calldata.
|
||||
function _encodeXDomainCalldata(
|
||||
address _sender,
|
||||
address _target,
|
||||
uint256 _value,
|
||||
uint256 _messageNonce,
|
||||
bytes memory _message
|
||||
) internal pure returns (bytes memory) {
|
||||
return
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
_sender,
|
||||
_target,
|
||||
_value,
|
||||
_messageNonce,
|
||||
_message
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
# generates go bindings from contracts, to paste into abi/bridge_abi.go
|
||||
# compile artifacts in /contracts folder with `forge build`` first
|
||||
|
||||
# Only run if it is ran from repository root.
|
||||
if [[ ! -d "cmd" ]]
|
||||
then
|
||||
@@ -14,16 +17,20 @@ else
|
||||
mkdir -p contracts
|
||||
fi
|
||||
|
||||
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "ZKRollup")
|
||||
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "rollup")
|
||||
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "ZKRollup")
|
||||
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "IScrollChain" "L1MessageQueue")
|
||||
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "scrollchain" "l1_message_queue")
|
||||
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "IScrollChain" "L1MessageQueue")
|
||||
|
||||
for i in "${!abi_name[@]}"; do
|
||||
abi="bridge/abi/${abi_name[$i]}.json"
|
||||
pkg="${pkg_name[$i]}"
|
||||
mkdir -p tmp
|
||||
abi="tmp/${abi_name[$i]}.json"
|
||||
cat ../contracts/artifacts/src/${abi_name[$i]}.sol/${abi_name[$i]}.json | jq '.abi' > $abi
|
||||
pkg="${pkg_name[$i]}_abi"
|
||||
out="contracts/${pkg}/${gen_name[$i]}.go"
|
||||
echo "generating ${out} from ${abi}"
|
||||
mkdir -p contracts/$pkg
|
||||
abigen --abi=$abi --pkg=$pkg --out=$out
|
||||
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' $out > temp && mv temp $out
|
||||
done
|
||||
done
|
||||
|
||||
rm -rf tmp
|
||||
@@ -41,16 +41,6 @@ var (
|
||||
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
|
||||
)
|
||||
|
||||
// DefaultSenderConfig The default config
|
||||
var DefaultSenderConfig = config.SenderConfig{
|
||||
Endpoint: "",
|
||||
EscalateBlocks: 3,
|
||||
EscalateMultipleNum: 11,
|
||||
EscalateMultipleDen: 10,
|
||||
MaxGasPrice: 1000_000_000_000, // this is 1000 gwei
|
||||
TxType: AccessListTxType,
|
||||
}
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
type Confirmation struct {
|
||||
ID string
|
||||
@@ -97,9 +87,6 @@ type Sender struct {
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
|
||||
if config == nil {
|
||||
config = &DefaultSenderConfig
|
||||
}
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -45,8 +45,8 @@ var (
|
||||
l1MessengerAddress common.Address
|
||||
|
||||
// l1 rollup contract
|
||||
l1RollupInstance *mock_bridge.MockBridgeL1
|
||||
l1RollupAddress common.Address
|
||||
scrollChainInstance *mock_bridge.MockBridgeL1
|
||||
scrollChainAddress common.Address
|
||||
|
||||
// l2 messenger contract
|
||||
l2MessengerInstance *mock_bridge.MockBridgeL2
|
||||
@@ -61,6 +61,8 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
|
||||
assert.NoError(t, err)
|
||||
gasOraclePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121215"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
@@ -68,9 +70,11 @@ func setupEnv(t *testing.T) {
|
||||
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
cfg.L1Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
|
||||
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKeys = []*ecdsa.PrivateKey{gasOraclePrivateKey}
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
@@ -99,8 +103,10 @@ func setupEnv(t *testing.T) {
|
||||
// send some balance to message and rollup sender
|
||||
transferEther(t, l1Auth, l1Client, messagePrivateKey)
|
||||
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
|
||||
transferEther(t, l1Auth, l1Client, gasOraclePrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, messagePrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, gasOraclePrivateKey)
|
||||
}
|
||||
|
||||
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
|
||||
@@ -160,10 +166,10 @@ func prepareContracts(t *testing.T) {
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 rollup contract
|
||||
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
// L1 ScrolChain contract
|
||||
_, tx, scrollChainInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
scrollChainAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L2 messenger contract
|
||||
@@ -173,12 +179,16 @@ func prepareContracts(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
|
||||
cfg.L1Config.RollupContractAddress = l1RollupAddress
|
||||
cfg.L1Config.L1MessageQueueAddress = l1MessengerAddress
|
||||
cfg.L1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
|
||||
cfg.L1Config.RelayerConfig.GasPriceOracleContractAddress = l1MessengerAddress
|
||||
|
||||
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
|
||||
cfg.L2Config.L2MessageQueueAddress = l2MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
|
||||
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
|
||||
cfg.L2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
|
||||
cfg.L2Config.RelayerConfig.GasPriceOracleContractAddress = l2MessengerAddress
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
@@ -197,8 +207,15 @@ func TestFunction(t *testing.T) {
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l1 message
|
||||
t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed)
|
||||
|
||||
// l2 message
|
||||
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
|
||||
t.Run("TestRelayL2MessageSucceed", testRelayL2MessageSucceed)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
|
||||
129
bridge/tests/gas_oracle_test.go
Normal file
129
bridge/tests/gas_oracle_test.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testImportL1GasPrice(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number, startHeight-1)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
latestBlockHeight, err := db.GetLatestL1BlockHeight()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := db.GetL1BlockInfos(map[string]interface{}{
|
||||
"number": latestBlockHeight,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOraclePending)
|
||||
assert.Equal(t, blocks[0].OracleTxHash.Valid, false)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = db.GetL1BlockInfos(map[string]interface{}{
|
||||
"number": latestBlockHeight,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Equal(t, blocks[0].GasOracleStatus, types.GasOracleImporting)
|
||||
assert.Equal(t, blocks[0].OracleTxHash.Valid, true)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
|
||||
// Create L2Relayer
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// add fake blocks
|
||||
traces := []*geth_types.BlockTrace{
|
||||
{
|
||||
Header: &geth_types.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
},
|
||||
}
|
||||
err = db.InsertL2BlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
traces[0],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
|
||||
// add fake batch
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// check db status
|
||||
batch, err := db.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batch.OracleStatus, types.GasOraclePending)
|
||||
assert.Equal(t, batch.OracleTxHash.Valid, false)
|
||||
|
||||
// relay gas price
|
||||
l2Relayer.ProcessGasPriceOracle()
|
||||
batch, err = db.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batch.OracleStatus, types.GasOracleImporting)
|
||||
assert.Equal(t, batch.OracleTxHash.Valid, true)
|
||||
}
|
||||
86
bridge/tests/l1_message_relay_test.go
Normal file
86
bridge/tests/l1_message_relay_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testRelayL1MessageSucceed(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
l2Cfg := cfg.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := l1.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l1MessengerInstance.SendMessage(l1Auth, l2Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l1Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l1 watch process events
|
||||
l1Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
|
||||
|
||||
// check db status
|
||||
msg, err := db.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, types.MsgPending)
|
||||
assert.Equal(t, msg.Target, l2Auth.From.String())
|
||||
|
||||
// process l1 messages
|
||||
l1Relayer.ProcessSavedEvents()
|
||||
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, types.MsgSubmitted)
|
||||
relayTxHash, err := db.GetRelayL1MessageTxHash(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, relayTxHash.Valid)
|
||||
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
l2Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
|
||||
msg, err = db.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, types.MsgConfirmed)
|
||||
}
|
||||
@@ -7,16 +7,17 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testRelayL2MessageSucceed(t *testing.T) {
|
||||
@@ -28,19 +29,19 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L2Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, db)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
@@ -49,7 +50,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
@@ -59,62 +60,60 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
// check db status
|
||||
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgPending)
|
||||
assert.Equal(t, msg.Status, types.MsgPending)
|
||||
assert.Equal(t, msg.Sender, l2Auth.From.String())
|
||||
assert.Equal(t, msg.Target, l1Auth.From.String())
|
||||
|
||||
// add fake blocks
|
||||
traces := []*types.BlockTrace{
|
||||
traces := []*geth_types.BlockTrace{
|
||||
{
|
||||
Header: &types.Header{
|
||||
Header: &geth_types.Header{
|
||||
Number: sendReceipt.BlockNumber,
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
err = db.InsertL2BlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
traces[0],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
batchHash := batchData.Hash().String()
|
||||
|
||||
// add fake batch
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[0].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
|
||||
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
|
||||
for i, block := range batchData.Batch.Blocks {
|
||||
blockIDs[i] = block.BlockNumber
|
||||
}
|
||||
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
l2Relayer.SendCommitTx([]*types.BatchData{batchData})
|
||||
status, err := db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.Equal(t, types.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
@@ -126,16 +125,16 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
// fetch CommitBatch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
assert.Equal(t, types.RollupCommitted, status)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.Equal(t, types.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
@@ -147,15 +146,15 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
// fetch FinalizeBatch events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
assert.Equal(t, types.RollupFinalized, status)
|
||||
|
||||
// process l2 messages
|
||||
l2Relayer.ProcessSavedEvents()
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgSubmitted)
|
||||
assert.Equal(t, msg.Status, types.MsgSubmitted)
|
||||
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, relayTxHash.Valid)
|
||||
@@ -170,5 +169,5 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgConfirmed)
|
||||
assert.Equal(t, msg.Status, types.MsgConfirmed)
|
||||
}
|
||||
|
||||
@@ -3,18 +3,20 @@ package tests
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
@@ -28,63 +30,63 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
var traces []*geth_types.BlockTrace
|
||||
var parentHash common.Hash
|
||||
for i := 1; i <= 10; i++ {
|
||||
header := types.Header{
|
||||
header := geth_types.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
ParentHash: parentHash,
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
}
|
||||
traces = append(traces, &types.BlockTrace{
|
||||
traces = append(traces, &geth_types.BlockTrace{
|
||||
Header: &header,
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
StorageTrace: &geth_types.StorageTrace{},
|
||||
})
|
||||
parentHash = header.Hash()
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
err = db.InsertL2BlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := types.NewBatchData(parentBatch, []*geth_types.BlockTrace{
|
||||
traces[0],
|
||||
traces[1],
|
||||
}, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
|
||||
batchHash := batchData.Hash().String()
|
||||
|
||||
// add one batch to db
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[1].Header.Number.Uint64(),
|
||||
Hash: traces[1].Header.Hash().String(),
|
||||
ParentHash: traces[1].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[1].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
|
||||
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
|
||||
for i, block := range batchData.Batch.Blocks {
|
||||
blockIDs[i] = block.BlockNumber
|
||||
}
|
||||
err = db.SetBatchHashForL2BlocksInDBTx(dbTx, blockIDs, batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
l2Relayer.SendCommitTx([]*types.BatchData{batchData})
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
status, err := db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.Equal(t, types.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
@@ -96,25 +98,25 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
assert.Equal(t, types.RollupCommitted, status)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.Equal(t, types.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
@@ -126,7 +128,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
status, err = db.GetRollupStatus(batchHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
assert.Equal(t, types.RollupFinalized, status)
|
||||
}
|
||||
|
||||
@@ -1,21 +1,20 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/keccak256"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
bridgeabi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
func Keccak2(a common.Hash, b common.Hash) common.Hash {
|
||||
return common.BytesToHash(keccak256.Hash(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
func encodePacked(input ...[]byte) []byte {
|
||||
return bytes.Join(input, nil)
|
||||
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
// ComputeMessageHash compute the message hash
|
||||
@@ -23,21 +22,11 @@ func ComputeMessageHash(
|
||||
sender common.Address,
|
||||
target common.Address,
|
||||
value *big.Int,
|
||||
fee *big.Int,
|
||||
deadline *big.Int,
|
||||
message []byte,
|
||||
messageNonce *big.Int,
|
||||
message []byte,
|
||||
) common.Hash {
|
||||
packed := encodePacked(
|
||||
sender.Bytes(),
|
||||
target.Bytes(),
|
||||
math.U256Bytes(value),
|
||||
math.U256Bytes(fee),
|
||||
math.U256Bytes(deadline),
|
||||
math.U256Bytes(messageNonce),
|
||||
message,
|
||||
)
|
||||
return common.BytesToHash(keccak256.Hash(packed))
|
||||
data, _ := bridgeabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
|
||||
@@ -67,3 +56,43 @@ func BufferToUint256Le(buffer []byte) []*big.Int {
|
||||
}
|
||||
return buffer256
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
// @todo: add unit test.
|
||||
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||
// @todo: add unit test.
|
||||
func UnpackLogIntoMap(c *abi.ABI, out map[string]interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
@@ -29,13 +29,11 @@ func TestKeccak2(t *testing.T) {
|
||||
|
||||
func TestComputeMessageHash(t *testing.T) {
|
||||
hash := utils.ComputeMessageHash(
|
||||
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
|
||||
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
|
||||
big.NewInt(5000000000000000),
|
||||
common.HexToAddress("0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63"),
|
||||
common.HexToAddress("0x4592D8f8D7B001e72Cb26A73e4Fa1806a51aC79d"),
|
||||
big.NewInt(0),
|
||||
big.NewInt(1674204924),
|
||||
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
|
||||
big.NewInt(30706),
|
||||
big.NewInt(1),
|
||||
[]byte("testbridgecontract"),
|
||||
)
|
||||
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
|
||||
assert.Equal(t, "0xda253c04595a49017bb54b1b46088c69752b5ad2f0c47971ac76b8b25abec202", hash.String())
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -30,11 +31,12 @@ require (
|
||||
github.com/ethereum/go-ethereum v1.11.1 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
|
||||
@@ -148,8 +148,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
@@ -200,8 +201,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
@@ -284,7 +285,6 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@@ -731,6 +731,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||
|
||||
1
common/testdata/blockTrace_02.json
vendored
1
common/testdata/blockTrace_02.json
vendored
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
|
||||
"coinbase": {
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 2,
|
||||
|
||||
1
common/testdata/blockTrace_03.json
vendored
1
common/testdata/blockTrace_03.json
vendored
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
|
||||
"coinbase": {
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 3,
|
||||
|
||||
229
common/types/batch.go
Normal file
229
common/types/batch.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// PublicInputHashConfig is the configuration of how to compute the public input hash.
|
||||
type PublicInputHashConfig struct {
|
||||
MaxTxNum int `json:"max_tx_num"`
|
||||
PaddingTxHash common.Hash `json:"padding_tx_hash"`
|
||||
}
|
||||
|
||||
const defaultMaxTxNum = 44
|
||||
|
||||
var defaultPaddingTxHash = [32]byte{}
|
||||
|
||||
// BatchData contains info of batch to be committed.
|
||||
type BatchData struct {
|
||||
Batch abi.IScrollChainBatch
|
||||
TxHashes []common.Hash
|
||||
TotalTxNum uint64
|
||||
TotalL1TxNum uint64
|
||||
TotalL2Gas uint64
|
||||
|
||||
// cache for the BatchHash
|
||||
hash *common.Hash
|
||||
// The config to compute the public input hash, or the block hash.
|
||||
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
|
||||
piCfg *PublicInputHashConfig
|
||||
}
|
||||
|
||||
// Hash calculates the hash of this batch.
|
||||
func (b *BatchData) Hash() *common.Hash {
|
||||
if b.hash != nil {
|
||||
return b.hash
|
||||
}
|
||||
|
||||
buf := make([]byte, 8)
|
||||
hasher := crypto.NewKeccakState()
|
||||
|
||||
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
|
||||
// @todo: panic on error here.
|
||||
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
|
||||
|
||||
// 2. hash all block contexts
|
||||
for _, block := range b.Batch.Blocks {
|
||||
// write BlockHash & ParentHash
|
||||
_, _ = hasher.Write(block.BlockHash[:])
|
||||
_, _ = hasher.Write(block.ParentHash[:])
|
||||
// write BlockNumber
|
||||
binary.BigEndian.PutUint64(buf, block.BlockNumber)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write Timestamp
|
||||
binary.BigEndian.PutUint64(buf, block.Timestamp)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write BaseFee
|
||||
var baseFee [32]byte
|
||||
if block.BaseFee != nil {
|
||||
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
|
||||
}
|
||||
_, _ = hasher.Write(baseFee[:])
|
||||
// write GasLimit
|
||||
binary.BigEndian.PutUint64(buf, block.GasLimit)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write NumTransactions
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
// write NumL1Messages
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
}
|
||||
|
||||
// 3. add all tx hashes
|
||||
for _, txHash := range b.TxHashes {
|
||||
_, _ = hasher.Write(txHash[:])
|
||||
}
|
||||
|
||||
// 4. append empty tx hash up to MaxTxNum
|
||||
maxTxNum := defaultMaxTxNum
|
||||
paddingTxHash := common.Hash(defaultPaddingTxHash)
|
||||
if b.piCfg != nil {
|
||||
maxTxNum = b.piCfg.MaxTxNum
|
||||
paddingTxHash = b.piCfg.PaddingTxHash
|
||||
}
|
||||
for i := len(b.TxHashes); i < maxTxNum; i++ {
|
||||
_, _ = hasher.Write(paddingTxHash[:])
|
||||
}
|
||||
|
||||
b.hash = new(common.Hash)
|
||||
_, _ = hasher.Read(b.hash[:])
|
||||
|
||||
return b.hash
|
||||
}
|
||||
|
||||
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
|
||||
// included in this batch
|
||||
func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCfg *PublicInputHashConfig) *BatchData {
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// set BatchIndex, ParentBatchHash
|
||||
batch.BatchIndex = parentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blockTraces))
|
||||
|
||||
var batchTxDataBuf bytes.Buffer
|
||||
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
|
||||
|
||||
for i, trace := range blockTraces {
|
||||
batchData.TotalTxNum += uint64(len(trace.Transactions))
|
||||
batchData.TotalL2Gas += trace.Header.GasUsed
|
||||
|
||||
// set baseFee to 0 when it's nil in the block header
|
||||
baseFee := trace.Header.BaseFee
|
||||
if baseFee == nil {
|
||||
baseFee = big.NewInt(0)
|
||||
}
|
||||
|
||||
batch.Blocks[i] = abi.IScrollChainBlockContext{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: trace.Header.ParentHash,
|
||||
BlockNumber: trace.Header.Number.Uint64(),
|
||||
Timestamp: trace.Header.Time,
|
||||
BaseFee: baseFee,
|
||||
GasLimit: trace.Header.GasLimit,
|
||||
NumTransactions: uint16(len(trace.Transactions)),
|
||||
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
|
||||
}
|
||||
|
||||
// fill in RLP-encoded transactions
|
||||
for _, txData := range trace.Transactions {
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
_, _ = batchTxDataWriter.Write(txLen[:])
|
||||
_, _ = batchTxDataWriter.Write(rlpTxData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
}
|
||||
|
||||
// set PrevStateRoot from the first block
|
||||
if i == 0 {
|
||||
batch.PrevStateRoot = trace.StorageTrace.RootBefore
|
||||
}
|
||||
|
||||
// set NewStateRoot & WithdrawTrieRoot from the last block
|
||||
if i == len(blockTraces)-1 {
|
||||
batch.NewStateRoot = trace.Header.Root
|
||||
batch.WithdrawTrieRoot = trace.WithdrawTrieRoot
|
||||
}
|
||||
}
|
||||
|
||||
if err := batchTxDataWriter.Flush(); err != nil {
|
||||
panic("Buffered I/O flush failed")
|
||||
}
|
||||
|
||||
batch.L2Transactions = batchTxDataBuf.Bytes()
|
||||
batchData.piCfg = piCfg
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// NewGenesisBatchData generates the batch that contains the genesis block.
|
||||
func NewGenesisBatchData(genesisBlockTrace *types.BlockTrace) *BatchData {
|
||||
header := genesisBlockTrace.Header
|
||||
if header.Number.Uint64() != 0 {
|
||||
panic("invalid genesis block trace: block number is not 0")
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// fill in batch information
|
||||
batch.BatchIndex = 0
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
|
||||
batch.NewStateRoot = header.Root
|
||||
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
|
||||
// L2Transactions should be empty
|
||||
|
||||
// fill in block context
|
||||
batch.Blocks[0] = abi.IScrollChainBlockContext{
|
||||
BlockHash: header.Hash(),
|
||||
ParentHash: header.ParentHash,
|
||||
BlockNumber: header.Number.Uint64(),
|
||||
Timestamp: header.Time,
|
||||
BaseFee: header.BaseFee,
|
||||
GasLimit: header.GasLimit,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
|
||||
func newByte32FromBytes(b []byte) [32]byte {
|
||||
var byte32 [32]byte
|
||||
|
||||
if len(b) > 32 {
|
||||
b = b[len(b)-32:]
|
||||
}
|
||||
|
||||
copy(byte32[32-len(b):], b)
|
||||
return byte32
|
||||
}
|
||||
99
common/types/batch_test.go
Normal file
99
common/types/batch_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
func TestBatchHash(t *testing.T) {
|
||||
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
|
||||
tx := new(geth_types.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 4,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
|
||||
batch := &batchData.Batch
|
||||
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
|
||||
|
||||
block := abi.IScrollChainBlockContext{
|
||||
BlockNumber: 51966,
|
||||
Timestamp: 123456789,
|
||||
BaseFee: new(big.Int).SetUint64(0),
|
||||
GasLimit: 10000000000000000,
|
||||
NumTransactions: 1,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
batch.Blocks = append(batch.Blocks, block)
|
||||
|
||||
hash := batchData.Hash()
|
||||
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
|
||||
|
||||
// use a different tx hash
|
||||
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
|
||||
tx = new(geth_types.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
batchData.TxHashes[0] = tx.Hash()
|
||||
|
||||
batchData.hash = nil // clear the cache
|
||||
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
|
||||
}
|
||||
|
||||
func TestNewGenesisBatch(t *testing.T) {
|
||||
genesisBlock := &geth_types.Header{
|
||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
|
||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(0),
|
||||
GasLimit: 940000000,
|
||||
GasUsed: 0,
|
||||
Time: 1639724192,
|
||||
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
BaseFee: big.NewInt(1000000000),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
genesisBlock.Hash().Hex(),
|
||||
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
|
||||
"wrong genesis block header",
|
||||
)
|
||||
|
||||
blockTrace := &geth_types.BlockTrace{
|
||||
Coinbase: nil,
|
||||
Header: genesisBlock,
|
||||
Transactions: []*geth_types.TransactionData{},
|
||||
StorageTrace: nil,
|
||||
ExecutionResults: []*geth_types.ExecutionResult{},
|
||||
MPTWitness: nil,
|
||||
}
|
||||
|
||||
batchData := NewGenesisBatchData(blockTrace)
|
||||
t.Log(batchData.Batch.Blocks[0])
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 25,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
batchData.Hash().Hex(),
|
||||
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
|
||||
"wrong genesis batch hash",
|
||||
)
|
||||
}
|
||||
250
common/types/db.go
Normal file
250
common/types/db.go
Normal file
@@ -0,0 +1,250 @@
|
||||
// Package types defines the table schema data structure used in the database tables
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
type L1BlockStatus int
|
||||
|
||||
// GasOracleStatus represents current gas oracle processing status
|
||||
type GasOracleStatus int
|
||||
|
||||
const (
|
||||
// L1BlockUndefined : undefined l1 block status
|
||||
L1BlockUndefined L1BlockStatus = iota
|
||||
|
||||
// L1BlockPending represents the l1 block status is pending
|
||||
L1BlockPending
|
||||
|
||||
// L1BlockImporting represents the l1 block status is importing
|
||||
L1BlockImporting
|
||||
|
||||
// L1BlockImported represents the l1 block status is imported
|
||||
L1BlockImported
|
||||
|
||||
// L1BlockFailed represents the l1 block status is failed
|
||||
L1BlockFailed
|
||||
)
|
||||
|
||||
const (
|
||||
// GasOracleUndefined : undefined gas oracle status
|
||||
GasOracleUndefined GasOracleStatus = iota
|
||||
|
||||
// GasOraclePending represents the gas oracle status is pending
|
||||
GasOraclePending
|
||||
|
||||
// GasOracleImporting represents the gas oracle status is importing
|
||||
GasOracleImporting
|
||||
|
||||
// GasOracleImported represents the gas oracle status is imported
|
||||
GasOracleImported
|
||||
|
||||
// GasOracleFailed represents the gas oracle status is failed
|
||||
GasOracleFailed
|
||||
)
|
||||
|
||||
// L1BlockInfo is structure of stored l1 block
|
||||
type L1BlockInfo struct {
|
||||
Number uint64 `json:"number" db:"number"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
HeaderRLP string `json:"header_rlp" db:"header_rlp"`
|
||||
BaseFee uint64 `json:"base_fee" db:"base_fee"`
|
||||
|
||||
BlockStatus L1BlockStatus `json:"block_status" db:"block_status"`
|
||||
GasOracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
|
||||
ImportTxHash sql.NullString `json:"import_tx_hash" db:"import_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
}
|
||||
|
||||
// MsgStatus represents current layer1 transaction processing status
|
||||
type MsgStatus int
|
||||
|
||||
const (
|
||||
// MsgUndefined : undefined msg status
|
||||
MsgUndefined MsgStatus = iota
|
||||
|
||||
// MsgPending represents the from_layer message status is pending
|
||||
MsgPending
|
||||
|
||||
// MsgSubmitted represents the from_layer message status is submitted
|
||||
MsgSubmitted
|
||||
|
||||
// MsgConfirmed represents the from_layer message status is confirmed
|
||||
MsgConfirmed
|
||||
|
||||
// MsgFailed represents the from_layer message status is failed
|
||||
MsgFailed
|
||||
|
||||
// MsgExpired represents the from_layer message status is expired
|
||||
MsgExpired
|
||||
)
|
||||
|
||||
// L1Message is structure of stored layer1 bridge message
|
||||
type L1Message struct {
|
||||
QueueIndex uint64 `json:"queue_index" db:"queue_index"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
type L2Message struct {
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
// BlockInfo is structure of stored `block_trace` without `trace`
|
||||
type BlockInfo struct {
|
||||
Number uint64 `json:"number" db:"number"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
BatchHash sql.NullString `json:"batch_hash" db:"batch_hash"`
|
||||
TxNum uint64 `json:"tx_num" db:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" db:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
|
||||
}
|
||||
|
||||
// RollerProveStatus is the roller prove status of a block batch (session)
|
||||
type RollerProveStatus int32
|
||||
|
||||
const (
|
||||
// RollerAssigned indicates roller assigned but has not submitted proof
|
||||
RollerAssigned RollerProveStatus = iota
|
||||
// RollerProofValid indicates roller has submitted valid proof
|
||||
RollerProofValid
|
||||
// RollerProofInvalid indicates roller has submitted invalid proof
|
||||
RollerProofInvalid
|
||||
)
|
||||
|
||||
func (s RollerProveStatus) String() string {
|
||||
switch s {
|
||||
case RollerAssigned:
|
||||
return "RollerAssigned"
|
||||
case RollerProofValid:
|
||||
return "RollerProofValid"
|
||||
case RollerProofInvalid:
|
||||
return "RollerProofInvalid"
|
||||
default:
|
||||
return fmt.Sprintf("Bad Value: %d", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// RollerStatus is the roller name and roller prove status
|
||||
type RollerStatus struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Status RollerProveStatus `json:"status"`
|
||||
}
|
||||
|
||||
// SessionInfo is assigned rollers info of a block batch (session)
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
type ProvingStatus int
|
||||
|
||||
const (
|
||||
// ProvingStatusUndefined : undefined proving_task status
|
||||
ProvingStatusUndefined ProvingStatus = iota
|
||||
// ProvingTaskUnassigned : proving_task is not assigned to be proved
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskSkipped : proving_task is skipped for proof generation
|
||||
ProvingTaskSkipped
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved : proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
// ProvingTaskFailed : fail to generate proof
|
||||
ProvingTaskFailed
|
||||
)
|
||||
|
||||
func (ps ProvingStatus) String() string {
|
||||
switch ps {
|
||||
case ProvingTaskUnassigned:
|
||||
return "unassigned"
|
||||
case ProvingTaskSkipped:
|
||||
return "skipped"
|
||||
case ProvingTaskAssigned:
|
||||
return "assigned"
|
||||
case ProvingTaskProved:
|
||||
return "proved"
|
||||
case ProvingTaskVerified:
|
||||
return "verified"
|
||||
case ProvingTaskFailed:
|
||||
return "failed"
|
||||
default:
|
||||
return "undefined"
|
||||
}
|
||||
}
|
||||
|
||||
// RollupStatus block_batch rollup_status (pending, committing, committed, finalizing, finalized)
|
||||
type RollupStatus int
|
||||
|
||||
const (
|
||||
// RollupUndefined : undefined rollup status
|
||||
RollupUndefined RollupStatus = iota
|
||||
// RollupPending : batch is pending to rollup to layer1
|
||||
RollupPending
|
||||
// RollupCommitting : rollup transaction is submitted to layer1
|
||||
RollupCommitting
|
||||
// RollupCommitted : rollup transaction is confirmed to layer1
|
||||
RollupCommitted
|
||||
// RollupFinalizing : finalize transaction is submitted to layer1
|
||||
RollupFinalizing
|
||||
// RollupFinalized : finalize transaction is confirmed to layer1
|
||||
RollupFinalized
|
||||
// RollupFinalizationSkipped : batch finalization is skipped
|
||||
RollupFinalizationSkipped
|
||||
)
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
type BlockBatch struct {
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// ComputeBatchID compute a unique hash for a batch using "endBlockHash" & "endBlockHash in last batch"
|
||||
// & "batch height", following the logic in `_computeBatchId` in contracts/src/L1/rollup/ZKRollup.sol
|
||||
func ComputeBatchID(endBlockHash common.Hash, lastEndBlockHash common.Hash, index *big.Int) string {
|
||||
indexBytes := make([]byte, 32)
|
||||
return crypto.Keccak256Hash(
|
||||
endBlockHash.Bytes(),
|
||||
lastEndBlockHash.Bytes(),
|
||||
index.FillBytes(indexBytes),
|
||||
).String()
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
func TestComputeBatchID(t *testing.T) {
|
||||
// expected generated using contract:
|
||||
// ```
|
||||
// // SPDX-License-Identifier: MIT
|
||||
|
||||
// pragma solidity ^0.6.6;
|
||||
|
||||
// contract AAA {
|
||||
// uint256 private constant MAX = ~uint256(0);
|
||||
|
||||
// function _computeBatchId() public pure returns (bytes32) {
|
||||
// return keccak256(abi.encode(bytes32(0), bytes32(0), MAX));
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
|
||||
expected := "0xafe1e714d2cd3ed5b0fa0a04ee95cd564b955ab8661c5665588758b48b66e263"
|
||||
actual := utils.ComputeBatchID(common.Hash{}, common.Hash{}, math.MaxBig256)
|
||||
assert.Equal(t, expected, actual)
|
||||
|
||||
expected = "0xe05698242b035c0e4d1d58e8ab89507ac7a1403b17fd6a7ea87621a32674ec88"
|
||||
actual = utils.ComputeBatchID(
|
||||
common.HexToHash("0xfaef7761204f43c4ab2528a65fcc7ec2108709e5ebb646bdce9ce3c8862d3f25"),
|
||||
common.HexToHash("0xe3abef08cce4b8a0dcc6b7e4dd11f32863007a86f46c1d136682b5d77bdf0f7a"),
|
||||
big.NewInt(77233900))
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "alpha-v1.2"
|
||||
var tag = "alpha-v1.7"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -36,24 +36,25 @@ describe("ScrollChain", async () => {
|
||||
await chain.importGenesisBatch({
|
||||
blocks: [
|
||||
{
|
||||
blockHash: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
blockHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
|
||||
parentHash: constants.HashZero,
|
||||
blockNumber: 0,
|
||||
timestamp: 0,
|
||||
baseFee: 0,
|
||||
gasLimit: 0,
|
||||
timestamp: 1639724192,
|
||||
baseFee: 1000000000,
|
||||
gasLimit: 940000000,
|
||||
numTransactions: 0,
|
||||
numL1Messages: 0,
|
||||
},
|
||||
],
|
||||
prevStateRoot: constants.HashZero,
|
||||
newStateRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
withdrawTrieRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
newStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
|
||||
withdrawTrieRoot: constants.HashZero,
|
||||
batchIndex: 0,
|
||||
parentBatchHash: constants.HashZero,
|
||||
l2Transactions: [],
|
||||
});
|
||||
const parentBatchHash = await chain.lastFinalizedBatchHash();
|
||||
console.log("genesis batch hash:", parentBatchHash);
|
||||
|
||||
for (let numTx = 1; numTx <= 25; ++numTx) {
|
||||
for (let txLength = 100; txLength <= 1000; txLength += 100) {
|
||||
@@ -72,7 +73,7 @@ describe("ScrollChain", async () => {
|
||||
blocks: [
|
||||
{
|
||||
blockHash: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
parentHash: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
parentHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
|
||||
blockNumber: 1,
|
||||
timestamp: numTx * 100000 + txLength,
|
||||
baseFee: 0,
|
||||
@@ -81,7 +82,7 @@ describe("ScrollChain", async () => {
|
||||
numL1Messages: 0,
|
||||
},
|
||||
],
|
||||
prevStateRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
prevStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
|
||||
newStateRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
withdrawTrieRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
|
||||
batchIndex: 1,
|
||||
|
||||
@@ -26,10 +26,10 @@ contract DeployL1BridgeContracts is Script {
|
||||
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
|
||||
uint256 MAX_TX_IN_ONE_BATCH = vm.envOr("MAX_TX_IN_ONE_BATCH", uint256(25));
|
||||
uint256 MAX_TX_IN_ONE_BATCH = vm.envOr("MAX_TX_IN_ONE_BATCH", uint256(44));
|
||||
|
||||
bytes32 PADDING_TX_HASH =
|
||||
vm.envOr("PADDING_TX_HASH", bytes32(0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6));
|
||||
vm.envOr("PADDING_TX_HASH", bytes32(0x0000000000000000000000000000000000000000000000000000000000000000));
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
@@ -147,7 +147,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
require(_genesisBlock.blockNumber == 0, "Block is not genesis");
|
||||
require(_genesisBlock.parentHash == bytes32(0), "Parent hash not empty");
|
||||
|
||||
bytes32 _batchHash = _commitBatch(_genesisBatch, true);
|
||||
bytes32 _batchHash = _commitBatch(_genesisBatch);
|
||||
|
||||
lastFinalizedBatchHash = _batchHash;
|
||||
finalizedBatches[0] = _batchHash;
|
||||
@@ -158,13 +158,13 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function commitBatch(Batch memory _batch) public override OnlySequencer {
|
||||
_commitBatch(_batch, false);
|
||||
_commitBatch(_batch);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function commitBatches(Batch[] memory _batches) public override OnlySequencer {
|
||||
for (uint256 i = 0; i < _batches.length; i++) {
|
||||
_commitBatch(_batches[i], false);
|
||||
_commitBatch(_batches[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
/// @dev Internal function to commit a batch.
|
||||
/// @param _batch The batch to commit.
|
||||
function _commitBatch(Batch memory _batch, bool _isGenesis) internal returns (bytes32) {
|
||||
function _commitBatch(Batch memory _batch) internal returns (bytes32) {
|
||||
// check whether the batch is empty
|
||||
require(_batch.blocks.length > 0, "Batch is empty");
|
||||
|
||||
|
||||
@@ -241,7 +241,8 @@ contract L2ERC1155GatewayTest is DSTestPlus, ERC1155TokenReceiver {
|
||||
uint256 tokenId,
|
||||
uint256 amount
|
||||
) public {
|
||||
if (to == address(0) || to.code.length > 0) to = address(1);
|
||||
hevm.assume(to != address(0));
|
||||
hevm.assume(to.code.length == 0);
|
||||
|
||||
gateway.updateTokenMapping(address(token), address(token));
|
||||
tokenId = bound(tokenId, 0, TOKEN_COUNT - 1);
|
||||
|
||||
@@ -220,8 +220,9 @@ contract L2ERC721GatewayTest is DSTestPlus {
|
||||
address to,
|
||||
uint256 tokenId
|
||||
) public {
|
||||
if (to == address(0)) to = address(1);
|
||||
if (to == address(mockRecipient)) to = address(1);
|
||||
hevm.assume(to != address(0));
|
||||
hevm.assume(to.code.length == 0);
|
||||
|
||||
tokenId = bound(tokenId, NOT_OWNED_TOKEN_ID + 1, type(uint256).max);
|
||||
|
||||
// finalize deposit
|
||||
|
||||
@@ -178,12 +178,8 @@ contract L2ETHGatewayTest is L2GatewayTestBase {
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) public {
|
||||
uint256 size;
|
||||
assembly {
|
||||
size := extcodesize(recipient)
|
||||
}
|
||||
hevm.assume(size == 0);
|
||||
hevm.assume(recipient != address(0));
|
||||
hevm.assume(recipient.code.length == 0);
|
||||
|
||||
amount = bound(amount, 1, address(this).balance / 2);
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// RollerDebugAPI roller api interface in order go get debug message.
|
||||
@@ -63,7 +63,7 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func newSessionInfo(sess *session, status orm.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
now := time.Now()
|
||||
var nameList []string
|
||||
for pk := range sess.info.Rollers {
|
||||
@@ -90,7 +90,7 @@ func (m *Manager) GetSessionInfo(sessionID string) (*SessionInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
if s, ok := m.sessions[sessionID]; ok {
|
||||
return newSessionInfo(s, orm.ProvingTaskAssigned, "", false), nil
|
||||
return newSessionInfo(s, types.ProvingTaskAssigned, "", false), nil
|
||||
}
|
||||
return nil, fmt.Errorf("no such session, sessionID: %s", sessionID)
|
||||
}
|
||||
|
||||
@@ -70,6 +70,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
|
||||
@@ -12,15 +12,15 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
"scroll-tech/coordinator/config"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
@@ -33,12 +33,12 @@ const (
|
||||
type rollerProofStatus struct {
|
||||
id string
|
||||
pk string
|
||||
status orm.RollerProveStatus
|
||||
status types.RollerProveStatus
|
||||
}
|
||||
|
||||
// Contains all the information on an ongoing proof generation session.
|
||||
type session struct {
|
||||
info *orm.SessionInfo
|
||||
info *types.SessionInfo
|
||||
// finish channel is used to pass the public key of the rollers who finished proving process.
|
||||
finishChan chan rollerProofStatus
|
||||
}
|
||||
@@ -136,8 +136,8 @@ func (m *Manager) isRunning() bool {
|
||||
// Loop keeps the manager running.
|
||||
func (m *Manager) Loop() {
|
||||
var (
|
||||
tick = time.NewTicker(time.Second * 3)
|
||||
tasks []*orm.BlockBatch
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
tasks []*types.BlockBatch
|
||||
)
|
||||
defer tick.Stop()
|
||||
|
||||
@@ -148,7 +148,7 @@ func (m *Manager) Loop() {
|
||||
var err error
|
||||
// TODO: add cache
|
||||
if tasks, err = m.orm.GetBlockBatches(
|
||||
map[string]interface{}{"proving_status": orm.ProvingTaskUnassigned},
|
||||
map[string]interface{}{"proving_status": types.ProvingTaskUnassigned},
|
||||
fmt.Sprintf(
|
||||
"ORDER BY index %s LIMIT %d;",
|
||||
m.cfg.OrderSession,
|
||||
@@ -183,9 +183,9 @@ func (m *Manager) restorePrevSessions() {
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if ids, err := m.orm.GetAssignedBatchIDs(); err != nil {
|
||||
log.Error("failed to get assigned batch ids from db", "error", err)
|
||||
} else if prevSessions, err := m.orm.GetSessionInfosByIDs(ids); err != nil {
|
||||
if hashes, err := m.orm.GetAssignedBatchHashes(); err != nil {
|
||||
log.Error("failed to get assigned batch hashes from db", "error", err)
|
||||
} else if prevSessions, err := m.orm.GetSessionInfosByHashes(hashes); err != nil {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
} else {
|
||||
for _, v := range prevSessions {
|
||||
@@ -233,7 +233,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == orm.RollerProofValid {
|
||||
if roller.Status == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
@@ -257,14 +257,14 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
// Roll back current proof's status.
|
||||
if dbErr != nil {
|
||||
if err := m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskUnassigned); err != nil {
|
||||
if err := m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
// set proof status
|
||||
status := orm.RollerProofInvalid
|
||||
status := types.RollerProofInvalid
|
||||
if success && dbErr == nil {
|
||||
status = orm.RollerProofValid
|
||||
status = types.RollerProofValid
|
||||
}
|
||||
// notify the session that the roller finishes the proving process
|
||||
sess.finishChan <- rollerProofStatus{msg.ID, pk, status}
|
||||
@@ -282,17 +282,17 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
// store proof content
|
||||
if dbErr = m.orm.UpdateProofByID(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskProved); dbErr != nil {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
|
||||
var err error
|
||||
tasks, err := m.orm.GetBlockBatches(map[string]interface{}{"id": msg.ID})
|
||||
tasks, err := m.orm.GetBlockBatches(map[string]interface{}{"hash": msg.ID})
|
||||
if len(tasks) == 0 {
|
||||
if err != nil {
|
||||
log.Error("failed to get tasks", "error", err)
|
||||
@@ -311,11 +311,11 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
if success {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskVerified); dbErr != nil {
|
||||
if dbErr = m.orm.UpdateProvingStatus(msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", orm.ProvingTaskVerified,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
}
|
||||
return dbErr
|
||||
@@ -342,7 +342,7 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// First, round up the keys that actually sent in a valid proof.
|
||||
var participatingRollers []string
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if roller.Status == orm.RollerProofValid {
|
||||
if roller.Status == types.RollerProofValid {
|
||||
participatingRollers = append(participatingRollers, pk)
|
||||
}
|
||||
}
|
||||
@@ -356,7 +356,7 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
return
|
||||
@@ -372,7 +372,7 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
if m.isSessionFailed(sess.info) {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
@@ -384,9 +384,9 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
|
||||
func (m *Manager) isSessionFailed(info *types.SessionInfo) bool {
|
||||
for _, roller := range info.Rollers {
|
||||
if roller.Status != orm.RollerProofInvalid {
|
||||
if roller.Status != types.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -410,32 +410,32 @@ func (m *Manager) APIs() []rpc.API {
|
||||
}
|
||||
|
||||
// StartProofGenerationSession starts a proof generation session
|
||||
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
|
||||
func (m *Manager) StartProofGenerationSession(task *types.BlockBatch) (success bool) {
|
||||
if m.GetNumberOfIdleRollers() == 0 {
|
||||
log.Warn("no idle roller when starting proof generation session", "id", task.ID)
|
||||
log.Warn("no idle roller when starting proof generation session", "id", task.Hash)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start proof generation session", "id", task.ID)
|
||||
log.Info("start proof generation session", "id", task.Hash)
|
||||
defer func() {
|
||||
if !success {
|
||||
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "err", err)
|
||||
if err := m.orm.UpdateProvingStatus(task.Hash, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", task.Hash, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get block traces.
|
||||
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": task.Hash})
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"could not GetBlockInfos",
|
||||
"batch_id", task.ID,
|
||||
"batch_hash", task.Hash,
|
||||
"error", err,
|
||||
)
|
||||
return false
|
||||
}
|
||||
traces := make([]*types.BlockTrace, len(blockInfos))
|
||||
traces := make([]*geth_types.BlockTrace, len(blockInfos))
|
||||
for i, blockInfo := range blockInfos {
|
||||
traces[i], err = m.Client.GetBlockTraceByHash(m.ctx, common.HexToHash(blockInfo.Hash))
|
||||
if err != nil {
|
||||
@@ -450,37 +450,37 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
|
||||
}
|
||||
|
||||
// Dispatch task to rollers.
|
||||
rollers := make(map[string]*orm.RollerStatus)
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller()
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
|
||||
log.Info("roller is picked", "session id", task.Hash, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(task.ID, traces) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.ID)
|
||||
if !roller.sendTask(task.Hash, traces) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.Hash)
|
||||
continue
|
||||
}
|
||||
rollers[roller.PublicKey] = &orm.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: orm.RollerAssigned}
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", task.ID, "number of idle rollers", m.GetNumberOfIdleRollers())
|
||||
log.Error("no roller assigned", "id", task.Hash, "number of idle rollers", m.GetNumberOfIdleRollers())
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", task.ID, "err", err)
|
||||
if err = m.orm.UpdateProvingStatus(task.Hash, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", task.Hash, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &orm.SessionInfo{
|
||||
ID: task.ID,
|
||||
info: &types.SessionInfo{
|
||||
ID: task.Hash,
|
||||
Rollers: rollers,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
},
|
||||
@@ -502,7 +502,7 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[task.ID] = sess
|
||||
m.sessions[task.Hash] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
@@ -517,7 +517,7 @@ func (m *Manager) IsRollerIdle(hexPk string) bool {
|
||||
// timeout. So a busy roller could be marked as idle in a finished session.
|
||||
for _, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == hexPk && roller.Status == orm.RollerAssigned {
|
||||
if pk == hexPk && roller.Status == types.RollerAssigned {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -527,7 +527,7 @@ func (m *Manager) IsRollerIdle(hexPk string) bool {
|
||||
}
|
||||
|
||||
func (m *Manager) addFailedSession(sess *session, errMsg string) {
|
||||
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, orm.ProvingTaskFailed, errMsg, true)
|
||||
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -15,20 +17,20 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/message"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridge_config "scroll-tech/bridge/config"
|
||||
@@ -39,6 +41,8 @@ import (
|
||||
var (
|
||||
cfg *bridge_config.Config
|
||||
dbImg docker.ImgInstance
|
||||
|
||||
batchData *types.BatchData
|
||||
)
|
||||
|
||||
func randomURL() string {
|
||||
@@ -55,6 +59,22 @@ func setEnv(t *testing.T) (err error) {
|
||||
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
|
||||
cfg.DBConfig.DSN = dbImg.Endpoint()
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace := &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData = types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -218,6 +238,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
@@ -248,13 +269,12 @@ func testValidProof(t *testing.T) {
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 1)
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
@@ -263,13 +283,13 @@ func testValidProof(t *testing.T) {
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskVerified {
|
||||
ids = ids[1:]
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -307,13 +327,12 @@ func testInvalidProof(t *testing.T) {
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 1)
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
@@ -322,13 +341,13 @@ func testInvalidProof(t *testing.T) {
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskFailed {
|
||||
ids = ids[1:]
|
||||
if status == types.ProvingTaskFailed {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -367,13 +386,12 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
|
||||
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 2)
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
@@ -382,13 +400,13 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskVerified {
|
||||
ids = ids[1:]
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
@@ -404,12 +422,12 @@ func testGracefulRestart(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
var ids = make([]string, 1)
|
||||
var hashes = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ids[i], err = l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
for i := range hashes {
|
||||
assert.NoError(t, l2db.NewBatchInDBTx(dbTx, batchData))
|
||||
hashes[i] = batchData.Hash().Hex()
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
@@ -438,15 +456,15 @@ func testGracefulRestart(t *testing.T) {
|
||||
newRollerManager.Stop()
|
||||
}()
|
||||
|
||||
for i := range ids {
|
||||
info, err := newRollerManager.GetSessionInfo(ids[i])
|
||||
assert.Equal(t, orm.ProvingTaskAssigned.String(), info.Status)
|
||||
for i := range hashes {
|
||||
info, err := newRollerManager.GetSessionInfo(hashes[i])
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// at this point, roller haven't submitted
|
||||
status, err := l2db.GetProvingStatusByID(ids[i])
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[i])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskAssigned, status)
|
||||
assert.Equal(t, types.ProvingTaskAssigned, status)
|
||||
}
|
||||
|
||||
// will overwrite the roller client for `SubmitProof`
|
||||
@@ -458,15 +476,15 @@ func testGracefulRestart(t *testing.T) {
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(15 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
for len(hashes) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
// this proves that the roller submits to the new coordinator,
|
||||
// because the roller client for `submitProof` has been overwritten
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
status, err := l2db.GetProvingStatusByHash(hashes[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskVerified {
|
||||
ids = ids[1:]
|
||||
if status == types.ProvingTaskVerified {
|
||||
hashes = hashes[1:]
|
||||
}
|
||||
|
||||
case <-tickStop:
|
||||
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/message"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// rollerNode records roller status and send task to connected roller.
|
||||
@@ -33,7 +32,7 @@ type rollerNode struct {
|
||||
registerTime time.Time
|
||||
}
|
||||
|
||||
func (r *rollerNode) sendTask(id string, traces []*types.BlockTrace) bool {
|
||||
func (r *rollerNode) sendTask(id string, traces []*geth_types.BlockTrace) bool {
|
||||
select {
|
||||
case r.taskChan <- &message.TaskMsg{
|
||||
ID: id,
|
||||
@@ -53,7 +52,7 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
|
||||
taskIDs := cmap.New()
|
||||
for id, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == pubkey && roller.Status == orm.RollerAssigned {
|
||||
if pk == pubkey && roller.Status == types.RollerAssigned {
|
||||
taskIDs.Set(id, struct{}{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,6 +70,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
|
||||
@@ -66,7 +66,8 @@ func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, int(cur))
|
||||
// total number of tables.
|
||||
assert.Equal(t, 6, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
@@ -9,7 +9,7 @@ create table block_trace
|
||||
hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
trace JSON NOT NULL,
|
||||
batch_id VARCHAR DEFAULT NULL,
|
||||
batch_hash VARCHAR DEFAULT NULL,
|
||||
tx_num INTEGER NOT NULL,
|
||||
gas_used BIGINT NOT NULL,
|
||||
block_timestamp NUMERIC NOT NULL
|
||||
@@ -27,8 +27,8 @@ create unique index block_trace_parent_uindex
|
||||
create unique index block_trace_parent_hash_uindex
|
||||
on block_trace (hash, parent_hash);
|
||||
|
||||
create index block_trace_batch_id_index
|
||||
on block_trace (batch_id);
|
||||
create index block_trace_batch_hash_index
|
||||
on block_trace (batch_hash);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
|
||||
@@ -2,15 +2,13 @@
|
||||
-- +goose StatementBegin
|
||||
create table l1_message
|
||||
(
|
||||
nonce BIGINT NOT NULL,
|
||||
queue_index BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
fee VARCHAR NOT NULL,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
deadline BIGINT NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
@@ -26,7 +24,7 @@ create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash);
|
||||
|
||||
create unique index l1_message_nonce_uindex
|
||||
on l1_message (nonce);
|
||||
on l1_message (queue_index);
|
||||
|
||||
create index l1_message_height_index
|
||||
on l1_message (height);
|
||||
|
||||
@@ -8,9 +8,6 @@ create table l2_message
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
fee VARCHAR NOT NULL,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
deadline BIGINT NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer2_hash VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
|
||||
@@ -3,14 +3,16 @@
|
||||
|
||||
create table block_batch
|
||||
(
|
||||
id VARCHAR NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
index BIGINT NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
start_block_hash VARCHAR NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
end_block_hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
state_root VARCHAR NOT NULL,
|
||||
total_tx_num BIGINT NOT NULL,
|
||||
total_l1_tx_num BIGINT NOT NULL,
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
@@ -19,6 +21,8 @@ create table block_batch
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
finalize_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
@@ -30,9 +34,11 @@ comment
|
||||
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
|
||||
comment
|
||||
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped';
|
||||
comment
|
||||
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index block_batch_id_uindex
|
||||
on block_batch (id);
|
||||
create unique index block_batch_hash_uindex
|
||||
on block_batch (hash);
|
||||
create unique index block_batch_index_uindex
|
||||
on block_batch (index);
|
||||
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
|
||||
create table session_info
|
||||
(
|
||||
id VARCHAR NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
rollers_info BYTEA NOT NULL
|
||||
);
|
||||
|
||||
create unique index session_info_id_uindex
|
||||
on session_info (id);
|
||||
create unique index session_info_hash_uindex
|
||||
on session_info (hash);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
|
||||
33
database/migrate/migrations/00006_l1_block.sql
Normal file
33
database/migrate/migrations/00006_l1_block.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table l1_block
|
||||
(
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
header_rlp TEXT NOT NULL,
|
||||
base_fee BIGINT NOT NULL,
|
||||
block_status INTEGER DEFAULT 1,
|
||||
import_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
comment
|
||||
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index l1_block_hash_uindex
|
||||
on l1_block (hash);
|
||||
|
||||
create unique index l1_block_number_uindex
|
||||
on l1_block (number);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
@@ -5,101 +5,15 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
type ProvingStatus int
|
||||
|
||||
const (
|
||||
// ProvingStatusUndefined : undefined proving_task status
|
||||
ProvingStatusUndefined ProvingStatus = iota
|
||||
// ProvingTaskUnassigned : proving_task is not assigned to be proved
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskSkipped : proving_task is skipped for proof generation
|
||||
ProvingTaskSkipped
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved : proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
// ProvingTaskFailed : fail to generate proof
|
||||
ProvingTaskFailed
|
||||
)
|
||||
|
||||
func (ps ProvingStatus) String() string {
|
||||
switch ps {
|
||||
case ProvingTaskUnassigned:
|
||||
return "unassigned"
|
||||
case ProvingTaskSkipped:
|
||||
return "skipped"
|
||||
case ProvingTaskAssigned:
|
||||
return "assigned"
|
||||
case ProvingTaskProved:
|
||||
return "proved"
|
||||
case ProvingTaskVerified:
|
||||
return "verified"
|
||||
case ProvingTaskFailed:
|
||||
return "failed"
|
||||
default:
|
||||
return "undefined"
|
||||
}
|
||||
}
|
||||
|
||||
// RollupStatus block_batch rollup_status (pending, committing, committed, finalizing, finalized)
|
||||
type RollupStatus int
|
||||
|
||||
const (
|
||||
// RollupUndefined : undefined rollup status
|
||||
RollupUndefined RollupStatus = iota
|
||||
// RollupPending : batch is pending to rollup to layer1
|
||||
RollupPending
|
||||
// RollupCommitting : rollup transaction is submitted to layer1
|
||||
RollupCommitting
|
||||
// RollupCommitted : rollup transaction is confirmed to layer1
|
||||
RollupCommitted
|
||||
// RollupFinalizing : finalize transaction is submitted to layer1
|
||||
RollupFinalizing
|
||||
// RollupFinalized : finalize transaction is confirmed to layer1
|
||||
RollupFinalized
|
||||
// RollupFinalizationSkipped : batch finalization is skipped
|
||||
RollupFinalizationSkipped
|
||||
)
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
type BlockBatch struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
|
||||
type blockBatchOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
@@ -111,7 +25,7 @@ func NewBlockBatchOrm(db *sqlx.DB) BlockBatchOrm {
|
||||
return &blockBatchOrm{db: db}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error) {
|
||||
func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error) {
|
||||
query := "SELECT * FROM block_batch WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
@@ -124,9 +38,9 @@ func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batches []*BlockBatch
|
||||
var batches []*types.BlockBatch
|
||||
for rows.Next() {
|
||||
batch := &BlockBatch{}
|
||||
batch := &types.BlockBatch{}
|
||||
if err = rows.StructScan(batch); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -139,19 +53,19 @@ func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...s
|
||||
return batches, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetProvingStatusByID(id string) (ProvingStatus, error) {
|
||||
row := o.db.QueryRow(`SELECT proving_status FROM block_batch WHERE id = $1`, id)
|
||||
var status ProvingStatus
|
||||
func (o *blockBatchOrm) GetProvingStatusByHash(hash string) (types.ProvingStatus, error) {
|
||||
row := o.db.QueryRow(`SELECT proving_status FROM block_batch WHERE hash = $1`, hash)
|
||||
var status types.ProvingStatus
|
||||
if err := row.Scan(&status); err != nil {
|
||||
return ProvingStatusUndefined, err
|
||||
return types.ProvingStatusUndefined, err
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetVerifiedProofAndInstanceByID(id string) ([]byte, []byte, error) {
|
||||
func (o *blockBatchOrm) GetVerifiedProofAndInstanceByHash(hash string) ([]byte, []byte, error) {
|
||||
var proof []byte
|
||||
var instance []byte
|
||||
row := o.db.QueryRow(`SELECT proof, instance_commitments FROM block_batch WHERE id = $1 and proving_status = $2`, id, ProvingTaskVerified)
|
||||
row := o.db.QueryRow(`SELECT proof, instance_commitments FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified)
|
||||
|
||||
if err := row.Scan(&proof, &instance); err != nil {
|
||||
return nil, nil, err
|
||||
@@ -159,74 +73,68 @@ func (o *blockBatchOrm) GetVerifiedProofAndInstanceByID(id string) ([]byte, []by
|
||||
return proof, instance, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
db := o.db
|
||||
if _, err := db.ExecContext(ctx,
|
||||
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where id = ?;`),
|
||||
proof, instanceCommitments, proofTimeSec, id,
|
||||
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where hash = ?;`),
|
||||
proof, instanceCommitments, proofTimeSec, hash,
|
||||
); err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateProvingStatus(id string, status ProvingStatus) error {
|
||||
func (o *blockBatchOrm) UpdateProvingStatus(hash string, status types.ProvingStatus) error {
|
||||
switch status {
|
||||
case ProvingTaskAssigned:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = ? where id = ?;"), status, time.Now(), id)
|
||||
case types.ProvingTaskAssigned:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = ? where hash = ?;"), status, time.Now(), hash)
|
||||
return err
|
||||
case ProvingTaskUnassigned:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = null where id = ?;"), status, id)
|
||||
case types.ProvingTaskUnassigned:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, prover_assigned_at = null where hash = ?;"), status, hash)
|
||||
return err
|
||||
case ProvingTaskProved, ProvingTaskVerified:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, proved_at = ? where id = ?;"), status, time.Now(), id)
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ?, proved_at = ? where hash = ?;"), status, time.Now(), hash)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where id = ?;"), status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where hash = ?;"), status, hash)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) ResetProvingStatusFor(before ProvingStatus) error {
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where proving_status = ?;"), ProvingTaskUnassigned, before)
|
||||
func (o *blockBatchOrm) ResetProvingStatusFor(before types.ProvingStatus) error {
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set proving_status = ? where proving_status = ?;"), types.ProvingTaskUnassigned, before)
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
|
||||
row := dbTx.QueryRow("SELECT COALESCE(MAX(index), -1) FROM block_batch;")
|
||||
|
||||
// TODO: use *big.Int for this
|
||||
var index int64
|
||||
if err := row.Scan(&index); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
index++
|
||||
id := utils.ComputeBatchID(common.HexToHash(endBlock.Hash), common.HexToHash(parentHash), big.NewInt(index))
|
||||
if _, err := dbTx.NamedExec(`INSERT INTO public.block_batch (id, index, parent_hash, start_block_number, start_block_hash, end_block_number, end_block_hash, total_tx_num, total_l2_gas) VALUES (:id, :index, :parent_hash, :start_block_number, :start_block_hash, :end_block_number, :end_block_hash, :total_tx_num, :total_l2_gas)`,
|
||||
// func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
|
||||
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error {
|
||||
numBlocks := len(batchData.Batch.Blocks)
|
||||
if _, err := dbTx.NamedExec(`INSERT INTO public.block_batch (hash, index, parent_hash, start_block_number, start_block_hash, end_block_number, end_block_hash, total_tx_num, total_l2_gas, state_root, total_l1_tx_num) VALUES (:hash, :index, :parent_hash, :start_block_number, :start_block_hash, :end_block_number, :end_block_hash, :total_tx_num, :total_l2_gas, :state_root, :total_l1_tx_num)`,
|
||||
map[string]interface{}{
|
||||
"id": id,
|
||||
"index": index,
|
||||
"parent_hash": parentHash,
|
||||
"start_block_number": startBlock.Number,
|
||||
"start_block_hash": startBlock.Hash,
|
||||
"end_block_number": endBlock.Number,
|
||||
"end_block_hash": endBlock.Hash,
|
||||
"total_tx_num": totalTxNum,
|
||||
"total_l2_gas": totalL2Gas,
|
||||
"hash": batchData.Hash().Hex(),
|
||||
"index": batchData.Batch.BatchIndex,
|
||||
"parent_hash": batchData.Batch.ParentBatchHash.Hex(),
|
||||
"start_block_number": batchData.Batch.Blocks[0].BlockNumber,
|
||||
"start_block_hash": batchData.Batch.Blocks[0].BlockHash.Hex(),
|
||||
"end_block_number": batchData.Batch.Blocks[numBlocks-1].BlockNumber,
|
||||
"end_block_hash": batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(),
|
||||
"total_tx_num": batchData.TotalTxNum,
|
||||
"total_l1_tx_num": batchData.TotalL1TxNum,
|
||||
"total_l2_gas": batchData.TotalL2Gas,
|
||||
"state_root": batchData.Batch.NewStateRoot.Hex(),
|
||||
"created_at": time.Now(),
|
||||
// "proving_status": ProvingTaskUnassigned, // actually no need, because we have default value in DB schema
|
||||
// "rollup_status": RollupPending, // actually no need, because we have default value in DB schema
|
||||
}); err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
|
||||
return id, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
|
||||
func (o *blockBatchOrm) BatchRecordExist(hash string) (bool, error) {
|
||||
var res int
|
||||
err := o.db.QueryRow(o.db.Rebind(`SELECT 1 FROM block_batch where id = ? limit 1;`), id).Scan(&res)
|
||||
err := o.db.QueryRow(o.db.Rebind(`SELECT 1 FROM block_batch where hash = ? limit 1;`), hash).Scan(&res)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return false, err
|
||||
@@ -237,31 +145,49 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
|
||||
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, types.RollupPending, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ids []string
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
var hash string
|
||||
if err = rows.Scan(&hash); err != nil {
|
||||
break
|
||||
}
|
||||
ids = append(ids, id)
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
if len(ids) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
if len(hashes) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no pending batches in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ids, rows.Close()
|
||||
return hashes, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
|
||||
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, RollupFinalized)
|
||||
batch := &BlockBatch{}
|
||||
func (o *blockBatchOrm) GetLatestBatch() (*types.BlockBatch, error) {
|
||||
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch);`)
|
||||
batch := &types.BlockBatch{}
|
||||
if err := row.StructScan(batch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetLatestCommittedBatch() (*types.BlockBatch, error) {
|
||||
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, types.RollupCommitted)
|
||||
batch := &types.BlockBatch{}
|
||||
if err := row.StructScan(batch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetLatestFinalizedBatch() (*types.BlockBatch, error) {
|
||||
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, types.RollupFinalized)
|
||||
batch := &types.BlockBatch{}
|
||||
if err := row.StructScan(batch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -269,142 +195,142 @@ func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupCommitted, limit)
|
||||
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, types.RollupCommitted, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ids []string
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
var hash string
|
||||
if err = rows.Scan(&hash); err != nil {
|
||||
break
|
||||
}
|
||||
ids = append(ids, id)
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
if len(ids) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
if len(hashes) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no committed batches in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ids, rows.Close()
|
||||
return hashes, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetRollupStatus(id string) (RollupStatus, error) {
|
||||
row := o.db.QueryRow(`SELECT rollup_status FROM block_batch WHERE id = $1`, id)
|
||||
var status RollupStatus
|
||||
func (o *blockBatchOrm) GetRollupStatus(hash string) (types.RollupStatus, error) {
|
||||
row := o.db.QueryRow(`SELECT rollup_status FROM block_batch WHERE hash = $1`, hash)
|
||||
var status types.RollupStatus
|
||||
if err := row.Scan(&status); err != nil {
|
||||
return RollupUndefined, err
|
||||
return types.RollupUndefined, err
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, error) {
|
||||
if len(ids) == 0 {
|
||||
return make([]RollupStatus, 0), nil
|
||||
func (o *blockBatchOrm) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
return make([]types.RollupStatus, 0), nil
|
||||
}
|
||||
|
||||
query, args, err := sqlx.In("SELECT id, rollup_status FROM block_batch WHERE id IN (?);", ids)
|
||||
query, args, err := sqlx.In("SELECT hash, rollup_status FROM block_batch WHERE hash IN (?);", hashes)
|
||||
if err != nil {
|
||||
return make([]RollupStatus, 0), err
|
||||
return make([]types.RollupStatus, 0), err
|
||||
}
|
||||
// sqlx.In returns queries with the `?` bindvar, we can rebind it for our backend
|
||||
query = o.db.Rebind(query)
|
||||
|
||||
rows, err := o.db.Query(query, args...)
|
||||
|
||||
statusMap := make(map[string]RollupStatus)
|
||||
statusMap := make(map[string]types.RollupStatus)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
var status RollupStatus
|
||||
if err = rows.Scan(&id, &status); err != nil {
|
||||
var hash string
|
||||
var status types.RollupStatus
|
||||
if err = rows.Scan(&hash, &status); err != nil {
|
||||
break
|
||||
}
|
||||
statusMap[id] = status
|
||||
statusMap[hash] = status
|
||||
}
|
||||
var statuses []RollupStatus
|
||||
var statuses []types.RollupStatus
|
||||
if err != nil {
|
||||
return statuses, err
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
statuses = append(statuses, statusMap[id])
|
||||
for _, hash := range hashes {
|
||||
statuses = append(statuses, statusMap[hash])
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetCommitTxHash(id string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT commit_tx_hash FROM block_batch WHERE id = $1`, id)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
func (o *blockBatchOrm) GetCommitTxHash(hash string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT commit_tx_hash FROM block_batch WHERE hash = $1`, hash)
|
||||
var commitTXHash sql.NullString
|
||||
if err := row.Scan(&commitTXHash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
return commitTXHash, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetFinalizeTxHash(id string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT finalize_tx_hash FROM block_batch WHERE id = $1`, id)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
func (o *blockBatchOrm) GetFinalizeTxHash(hash string) (sql.NullString, error) {
|
||||
row := o.db.QueryRow(`SELECT finalize_tx_hash FROM block_batch WHERE hash = $1`, hash)
|
||||
var finalizeTxHash sql.NullString
|
||||
if err := row.Scan(&finalizeTxHash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
return finalizeTxHash, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error {
|
||||
func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
|
||||
switch status {
|
||||
case RollupCommitted:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, committed_at = ? where id = ?;"), status, time.Now(), id)
|
||||
case types.RollupCommitted:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, committed_at = ? where hash = ?;"), status, time.Now(), hash)
|
||||
return err
|
||||
case RollupFinalized:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, finalized_at = ? where id = ?;"), status, time.Now(), id)
|
||||
case types.RollupFinalized:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ?, finalized_at = ? where hash = ?;"), status, time.Now(), hash)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where id = ?;"), status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where hash = ?;"), status, hash)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error {
|
||||
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
|
||||
switch status {
|
||||
case RollupCommitted:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commitTxHash, status, time.Now(), id)
|
||||
case types.RollupCommitted:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where hash = ?;"), commitTxHash, status, time.Now(), hash)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commitTxHash, status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where hash = ?;"), commitTxHash, status, hash)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error {
|
||||
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
|
||||
switch status {
|
||||
case RollupFinalized:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalizeTxHash, status, time.Now(), id)
|
||||
case types.RollupFinalized:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where hash = ?;"), finalizeTxHash, status, time.Now(), hash)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalizeTxHash, status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where hash = ?;"), finalizeTxHash, status, hash)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetAssignedBatchIDs() ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE proving_status IN ($1, $2)`, ProvingTaskAssigned, ProvingTaskProved)
|
||||
func (o *blockBatchOrm) GetAssignedBatchHashes() ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE proving_status IN ($1, $2)`, types.ProvingTaskAssigned, types.ProvingTaskProved)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ids []string
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
var hash string
|
||||
if err = rows.Scan(&hash); err != nil {
|
||||
break
|
||||
}
|
||||
ids = append(ids, id)
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
|
||||
return ids, rows.Close()
|
||||
return hashes, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetBatchCount() (int64, error) {
|
||||
@@ -417,7 +343,7 @@ func (o *blockBatchOrm) GetBatchCount() (int64, error) {
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
|
||||
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), RollupFinalizationSkipped, ProvingTaskSkipped, ProvingTaskFailed, RollupCommitted)
|
||||
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), types.RollupFinalizationSkipped, types.ProvingTaskSkipped, types.ProvingTaskFailed, types.RollupCommitted)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -429,3 +355,27 @@ func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateL2OracleTxHash(ctx context.Context, hash, txHash string) error {
|
||||
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_tx_hash = ? where hash = ?;"), txHash, hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateL2GasOracleStatus(ctx context.Context, hash string, status types.GasOracleStatus) error {
|
||||
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_status = ? where hash = ?;"), status, hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update block_batch set oracle_status = ?, oracle_tx_hash = ? where hash = ?;"), status, txHash, hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
@@ -26,7 +27,7 @@ func NewBlockTraceOrm(db *sqlx.DB) BlockTraceOrm {
|
||||
return &blockTraceOrm{db: db}
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) Exist(number uint64) (bool, error) {
|
||||
func (o *blockTraceOrm) IsL2BlockExists(number uint64) (bool, error) {
|
||||
var res int
|
||||
err := o.db.QueryRow(o.db.Rebind(`SELECT 1 from block_trace where number = ? limit 1;`), number).Scan(&res)
|
||||
if err != nil {
|
||||
@@ -38,7 +39,7 @@ func (o *blockTraceOrm) Exist(number uint64) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetBlockTracesLatestHeight() (int64, error) {
|
||||
func (o *blockTraceOrm) GetL2BlockTracesLatestHeight() (int64, error) {
|
||||
row := o.db.QueryRow("SELECT COALESCE(MAX(number), -1) FROM block_trace;")
|
||||
|
||||
var height int64
|
||||
@@ -48,7 +49,7 @@ func (o *blockTraceOrm) GetBlockTracesLatestHeight() (int64, error) {
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error) {
|
||||
func (o *blockTraceOrm) GetL2BlockTraces(fields map[string]interface{}, args ...string) ([]*geth_types.BlockTrace, error) {
|
||||
type Result struct {
|
||||
Trace string
|
||||
}
|
||||
@@ -65,13 +66,13 @@ func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var traces []*types.BlockTrace
|
||||
var traces []*geth_types.BlockTrace
|
||||
for rows.Next() {
|
||||
result := &Result{}
|
||||
if err = rows.StructScan(result); err != nil {
|
||||
break
|
||||
}
|
||||
trace := types.BlockTrace{}
|
||||
trace := geth_types.BlockTrace{}
|
||||
err = json.Unmarshal([]byte(result.Trace), &trace)
|
||||
if err != nil {
|
||||
break
|
||||
@@ -85,8 +86,8 @@ func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...st
|
||||
return traces, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp FROM block_trace WHERE 1 = 1 "
|
||||
func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp FROM block_trace WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -98,9 +99,9 @@ func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blocks []*BlockInfo
|
||||
var blocks []*types.BlockInfo
|
||||
for rows.Next() {
|
||||
block := &BlockInfo{}
|
||||
block := &types.BlockInfo{}
|
||||
if err = rows.StructScan(block); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -113,8 +114,8 @@ func (o *blockTraceOrm) GetBlockInfos(fields map[string]interface{}, args ...str
|
||||
return blocks, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_id, tx_num, gas_used, block_timestamp FROM block_trace WHERE batch_id is NULL "
|
||||
func (o *blockTraceOrm) GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
|
||||
query := "SELECT number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp FROM block_trace WHERE batch_hash is NULL "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -126,9 +127,9 @@ func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blocks []*BlockInfo
|
||||
var blocks []*types.BlockInfo
|
||||
for rows.Next() {
|
||||
block := &BlockInfo{}
|
||||
block := &types.BlockInfo{}
|
||||
if err = rows.StructScan(block); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func (o *blockTraceOrm) GetUnbatchedBlocks(fields map[string]interface{}, args .
|
||||
return blocks, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
|
||||
func (o *blockTraceOrm) GetL2BlockHashByNumber(number uint64) (*common.Hash, error) {
|
||||
row := o.db.QueryRow(`SELECT hash FROM block_trace WHERE number = $1`, number)
|
||||
var hashStr string
|
||||
if err := row.Scan(&hashStr); err != nil {
|
||||
@@ -151,7 +152,7 @@ func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error {
|
||||
func (o *blockTraceOrm) InsertL2BlockTraces(blockTraces []*geth_types.BlockTrace) error {
|
||||
traceMaps := make([]map[string]interface{}, len(blockTraces))
|
||||
for i, trace := range blockTraces {
|
||||
number, hash, txNum, mtime := trace.Header.Number.Int64(),
|
||||
@@ -186,8 +187,8 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) DeleteTracesByBatchID(batchID string) error {
|
||||
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batchID); err != nil {
|
||||
func (o *blockTraceOrm) DeleteTracesByBatchHash(batchHash string) error {
|
||||
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_hash = ?;"), "{}", batchHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -195,10 +196,10 @@ func (o *blockTraceOrm) DeleteTracesByBatchID(batchID string) error {
|
||||
|
||||
// http://jmoiron.github.io/sqlx/#inQueries
|
||||
// https://stackoverflow.com/questions/56568799/how-to-update-multiple-rows-using-sqlx
|
||||
func (o *blockTraceOrm) SetBatchIDForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchID string) error {
|
||||
query := "UPDATE block_trace SET batch_id=? WHERE number IN (?)"
|
||||
func (o *blockTraceOrm) SetBatchHashForL2BlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchHash string) error {
|
||||
query := "UPDATE block_trace SET batch_hash=? WHERE number IN (?)"
|
||||
|
||||
qry, args, err := sqlx.In(query, batchID, numbers)
|
||||
qry, args, err := sqlx.In(query, batchHash, numbers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,191 +3,108 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
eth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// MsgStatus represents current layer1 transaction processing status
|
||||
type MsgStatus int
|
||||
|
||||
const (
|
||||
// MsgUndefined : undefined msg status
|
||||
MsgUndefined MsgStatus = iota
|
||||
|
||||
// MsgPending represents the from_layer message status is pending
|
||||
MsgPending
|
||||
|
||||
// MsgSubmitted represents the from_layer message status is submitted
|
||||
MsgSubmitted
|
||||
|
||||
// MsgConfirmed represents the from_layer message status is confirmed
|
||||
MsgConfirmed
|
||||
|
||||
// MsgFailed represents the from_layer message status is failed
|
||||
MsgFailed
|
||||
|
||||
// MsgExpired represents the from_layer message status is expired
|
||||
MsgExpired
|
||||
)
|
||||
|
||||
// L1Message is structure of stored layer1 bridge message
|
||||
type L1Message struct {
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Fee string `json:"fee" db:"fee"`
|
||||
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
|
||||
Deadline uint64 `json:"deadline" db:"deadline"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
type L2Message struct {
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Fee string `json:"fee" db:"fee"`
|
||||
GasLimit uint64 `json:"gas_limit" db:"gas_limit"`
|
||||
Deadline uint64 `json:"deadline" db:"deadline"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
// BlockInfo is structure of stored `block_trace` without `trace`
|
||||
type BlockInfo struct {
|
||||
Number uint64 `json:"number" db:"number"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
BatchID sql.NullString `json:"batch_id" db:"batch_id"`
|
||||
TxNum uint64 `json:"tx_num" db:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" db:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" db:"block_timestamp"`
|
||||
}
|
||||
|
||||
// RollerProveStatus is the roller prove status of a block batch (session)
|
||||
type RollerProveStatus int32
|
||||
|
||||
const (
|
||||
// RollerAssigned indicates roller assigned but has not submitted proof
|
||||
RollerAssigned RollerProveStatus = iota
|
||||
// RollerProofValid indicates roller has submitted valid proof
|
||||
RollerProofValid
|
||||
// RollerProofInvalid indicates roller has submitted invalid proof
|
||||
RollerProofInvalid
|
||||
)
|
||||
|
||||
func (s RollerProveStatus) String() string {
|
||||
switch s {
|
||||
case RollerAssigned:
|
||||
return "RollerAssigned"
|
||||
case RollerProofValid:
|
||||
return "RollerProofValid"
|
||||
case RollerProofInvalid:
|
||||
return "RollerProofInvalid"
|
||||
default:
|
||||
return fmt.Sprintf("Bad Value: %d", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// RollerStatus is the roller name and roller prove status
|
||||
type RollerStatus struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Status RollerProveStatus `json:"status"`
|
||||
}
|
||||
|
||||
// SessionInfo is assigned rollers info of a block batch (session)
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
// L1BlockOrm l1_block operation interface
|
||||
type L1BlockOrm interface {
|
||||
GetL1BlockInfos(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error)
|
||||
InsertL1Blocks(ctx context.Context, blocks []*types.L1BlockInfo) error
|
||||
DeleteHeaderRLPByBlockHash(ctx context.Context, blockHash string) error
|
||||
UpdateImportTxHash(ctx context.Context, blockHash, txHash string) error
|
||||
UpdateL1BlockStatus(ctx context.Context, blockHash string, status types.L1BlockStatus) error
|
||||
UpdateL1BlockStatusAndImportTxHash(ctx context.Context, blockHash string, status types.L1BlockStatus, txHash string) error
|
||||
UpdateL1OracleTxHash(ctx context.Context, blockHash, txHash string) error
|
||||
UpdateL1GasOracleStatus(ctx context.Context, blockHash string, status types.GasOracleStatus) error
|
||||
UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error
|
||||
GetLatestL1BlockHeight() (uint64, error)
|
||||
GetLatestImportedL1Block() (*types.L1BlockInfo, error)
|
||||
}
|
||||
|
||||
// BlockTraceOrm block_trace operation interface
|
||||
type BlockTraceOrm interface {
|
||||
Exist(number uint64) (bool, error)
|
||||
GetBlockTracesLatestHeight() (int64, error)
|
||||
GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error)
|
||||
GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
|
||||
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
GetHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchID(batchID string) error
|
||||
InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
SetBatchIDForBlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchID string) error
|
||||
IsL2BlockExists(number uint64) (bool, error)
|
||||
GetL2BlockTracesLatestHeight() (int64, error)
|
||||
GetL2BlockTraces(fields map[string]interface{}, args ...string) ([]*eth_types.BlockTrace, error)
|
||||
GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
|
||||
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_hash is NULL"
|
||||
GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error)
|
||||
GetL2BlockHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchHash(batchHash string) error
|
||||
InsertL2BlockTraces(blockTraces []*eth_types.BlockTrace) error
|
||||
SetBatchHashForL2BlocksInDBTx(dbTx *sqlx.Tx, numbers []uint64, batchHash string) error
|
||||
}
|
||||
|
||||
// SessionInfoOrm sessions info operation inte
|
||||
type SessionInfoOrm interface {
|
||||
GetSessionInfosByIDs(ids []string) ([]*SessionInfo, error)
|
||||
SetSessionInfo(rollersInfo *SessionInfo) error
|
||||
GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error)
|
||||
SetSessionInfo(rollersInfo *types.SessionInfo) error
|
||||
}
|
||||
|
||||
// BlockBatchOrm block_batch operation interface
|
||||
type BlockBatchOrm interface {
|
||||
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error)
|
||||
GetProvingStatusByID(id string) (ProvingStatus, error)
|
||||
GetVerifiedProofAndInstanceByID(id string) ([]byte, []byte, error)
|
||||
UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error
|
||||
UpdateProvingStatus(id string, status ProvingStatus) error
|
||||
ResetProvingStatusFor(before ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
|
||||
BatchRecordExist(id string) (bool, error)
|
||||
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error)
|
||||
GetProvingStatusByHash(hash string) (types.ProvingStatus, error)
|
||||
GetVerifiedProofAndInstanceByHash(hash string) ([]byte, []byte, error)
|
||||
UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error
|
||||
UpdateProvingStatus(hash string, status types.ProvingStatus) error
|
||||
ResetProvingStatusFor(before types.ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error
|
||||
BatchRecordExist(hash string) (bool, error)
|
||||
GetPendingBatches(limit uint64) ([]string, error)
|
||||
GetCommittedBatches(limit uint64) ([]string, error)
|
||||
GetRollupStatus(id string) (RollupStatus, error)
|
||||
GetRollupStatusByIDList(ids []string) ([]RollupStatus, error)
|
||||
GetLatestFinalizedBatch() (*BlockBatch, error)
|
||||
UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error
|
||||
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error
|
||||
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error
|
||||
GetAssignedBatchIDs() ([]string, error)
|
||||
GetRollupStatus(hash string) (types.RollupStatus, error)
|
||||
GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error)
|
||||
GetLatestBatch() (*types.BlockBatch, error)
|
||||
GetLatestCommittedBatch() (*types.BlockBatch, error)
|
||||
GetLatestFinalizedBatch() (*types.BlockBatch, error)
|
||||
UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error
|
||||
UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error
|
||||
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error
|
||||
GetAssignedBatchHashes() ([]string, error)
|
||||
UpdateSkippedBatches() (int64, error)
|
||||
GetBatchCount() (int64, error)
|
||||
|
||||
GetCommitTxHash(id string) (sql.NullString, error) // for unit tests only
|
||||
GetFinalizeTxHash(id string) (sql.NullString, error) // for unit tests only
|
||||
UpdateL2OracleTxHash(ctx context.Context, hash, txHash string) error
|
||||
UpdateL2GasOracleStatus(ctx context.Context, hash string, status types.GasOracleStatus) error
|
||||
UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error
|
||||
|
||||
GetCommitTxHash(hash string) (sql.NullString, error) // for unit tests only
|
||||
GetFinalizeTxHash(hash string) (sql.NullString, error) // for unit tests only
|
||||
}
|
||||
|
||||
// L1MessageOrm is layer1 message db interface
|
||||
type L1MessageOrm interface {
|
||||
GetL1MessageByNonce(nonce uint64) (*L1Message, error)
|
||||
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
|
||||
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
|
||||
GetL1ProcessedNonce() (int64, error)
|
||||
SaveL1Messages(ctx context.Context, messages []*L1Message) error
|
||||
GetL1MessageByQueueIndex(queueIndex uint64) (*types.L1Message, error)
|
||||
GetL1MessageByMsgHash(msgHash string) (*types.L1Message, error)
|
||||
GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]*types.L1Message, error)
|
||||
GetL1ProcessedQueueIndex() (int64, error)
|
||||
SaveL1Messages(ctx context.Context, messages []*types.L1Message) error
|
||||
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
|
||||
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error
|
||||
UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error
|
||||
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error
|
||||
GetLayer1LatestWatchedHeight() (int64, error)
|
||||
|
||||
GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
|
||||
GetRelayL1MessageTxHash(queueIndex uint64) (sql.NullString, error) // for unit tests only
|
||||
}
|
||||
|
||||
// L2MessageOrm is layer2 message db interface
|
||||
type L2MessageOrm interface {
|
||||
GetL2MessageByNonce(nonce uint64) (*L2Message, error)
|
||||
GetL2MessageByMsgHash(msgHash string) (*L2Message, error)
|
||||
GetL2MessageByNonce(nonce uint64) (*types.L2Message, error)
|
||||
GetL2MessageByMsgHash(msgHash string) (*types.L2Message, error)
|
||||
MessageProofExist(nonce uint64) (bool, error)
|
||||
GetMessageProofByNonce(nonce uint64) (string, error)
|
||||
GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error)
|
||||
GetL2Messages(fields map[string]interface{}, args ...string) ([]*types.L2Message, error)
|
||||
GetL2ProcessedNonce() (int64, error)
|
||||
SaveL2Messages(ctx context.Context, messages []*L2Message) error
|
||||
SaveL2Messages(ctx context.Context, messages []*types.L2Message) error
|
||||
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
|
||||
UpdateLayer2Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error
|
||||
UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error
|
||||
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error
|
||||
UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error
|
||||
GetLayer2LatestWatchedHeight() (int64, error)
|
||||
|
||||
|
||||
149
database/orm/l1_block.go
Normal file
149
database/orm/l1_block.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
type l1BlockOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
var _ L1BlockOrm = (*l1BlockOrm)(nil)
|
||||
|
||||
// NewL1BlockOrm create an l1BlockOrm instance
|
||||
func NewL1BlockOrm(db *sqlx.DB) L1BlockOrm {
|
||||
return &l1BlockOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) GetL1BlockInfos(fields map[string]interface{}, args ...string) ([]*types.L1BlockInfo, error) {
|
||||
query := "SELECT * FROM l1_block WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
query = strings.Join(append([]string{query}, args...), " ")
|
||||
query += " ORDER BY number ASC"
|
||||
|
||||
db := l.db
|
||||
rows, err := db.NamedQuery(db.Rebind(query), fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blocks []*types.L1BlockInfo
|
||||
for rows.Next() {
|
||||
block := &types.L1BlockInfo{}
|
||||
if err = rows.StructScan(block); err != nil {
|
||||
break
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks, rows.Close()
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) InsertL1Blocks(ctx context.Context, blocks []*types.L1BlockInfo) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blockMaps := make([]map[string]interface{}, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blockMaps[i] = map[string]interface{}{
|
||||
"number": block.Number,
|
||||
"hash": block.Hash,
|
||||
"header_rlp": block.HeaderRLP,
|
||||
"base_fee": block.BaseFee,
|
||||
}
|
||||
}
|
||||
_, err := l.db.NamedExec(`INSERT INTO public.l1_block (number, hash, header_rlp, base_fee) VALUES (:number, :hash, :header_rlp, :base_fee);`, blockMaps)
|
||||
if err != nil {
|
||||
log.Error("failed to insert L1 Blocks", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) DeleteHeaderRLPByBlockHash(ctx context.Context, blockHash string) error {
|
||||
if _, err := l.db.Exec(l.db.Rebind("update l1_block set header_rlp = ? where hash = ?;"), "", blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateImportTxHash(ctx context.Context, blockHash, txHash string) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set import_tx_hash = ? where hash = ?;"), txHash, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateL1BlockStatus(ctx context.Context, blockHash string, status types.L1BlockStatus) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set block_status = ? where hash = ?;"), status, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateL1BlockStatusAndImportTxHash(ctx context.Context, blockHash string, status types.L1BlockStatus, txHash string) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set block_status = ?, import_tx_hash = ? where hash = ?;"), status, txHash, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateL1OracleTxHash(ctx context.Context, blockHash, txHash string) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_tx_hash = ? where hash = ?;"), txHash, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateL1GasOracleStatus(ctx context.Context, blockHash string, status types.GasOracleStatus) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_status = ? where hash = ?;"), status, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update l1_block set oracle_status = ?, oracle_tx_hash = ? where hash = ?;"), status, txHash, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) GetLatestL1BlockHeight() (uint64, error) {
|
||||
row := l.db.QueryRow("SELECT COALESCE(MAX(number), 0) FROM l1_block;")
|
||||
|
||||
var height uint64
|
||||
if err := row.Scan(&height); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) GetLatestImportedL1Block() (*types.L1BlockInfo, error) {
|
||||
row := l.db.QueryRowx(`SELECT * FROM l1_block WHERE block_status = $1 ORDER BY index DESC;`, types.L1BlockImported)
|
||||
block := &types.L1BlockInfo{}
|
||||
if err := row.StructScan(block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
type l1MessageOrm struct {
|
||||
@@ -20,11 +22,11 @@ func NewL1MessageOrm(db *sqlx.DB) L1MessageOrm {
|
||||
return &l1MessageOrm{db: db}
|
||||
}
|
||||
|
||||
// GetL1MessageByMsgHash fetch message by nonce
|
||||
func (m *l1MessageOrm) GetL1MessageByMsgHash(msgHash string) (*L1Message, error) {
|
||||
msg := L1Message{}
|
||||
// GetL1MessageByMsgHash fetch message by queue_index
|
||||
func (m *l1MessageOrm) GetL1MessageByMsgHash(msgHash string) (*types.L1Message, error) {
|
||||
msg := types.L1Message{}
|
||||
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE msg_hash = $1`, msgHash)
|
||||
row := m.db.QueryRowx(`SELECT queue_index, msg_hash, height, sender, target, value, gas_limit, calldata, layer1_hash, status FROM l1_message WHERE msg_hash = $1`, msgHash)
|
||||
|
||||
if err := row.StructScan(&msg); err != nil {
|
||||
return nil, err
|
||||
@@ -32,11 +34,11 @@ func (m *l1MessageOrm) GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
// GetL1MessageByNonce fetch message by nonce
|
||||
func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
|
||||
msg := L1Message{}
|
||||
// GetL1MessageByQueueIndex fetch message by queue_index
|
||||
func (m *l1MessageOrm) GetL1MessageByQueueIndex(queueIndex uint64) (*types.L1Message, error) {
|
||||
msg := types.L1Message{}
|
||||
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE nonce = $1`, nonce)
|
||||
row := m.db.QueryRowx(`SELECT queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, status FROM l1_message WHERE queue_index = $1`, queueIndex)
|
||||
|
||||
if err := row.StructScan(&msg); err != nil {
|
||||
return nil, err
|
||||
@@ -45,15 +47,15 @@ func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
|
||||
}
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
|
||||
func (m *l1MessageOrm) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]*types.L1Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT queue_index, msg_hash, height, sender, target, value, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY queue_index ASC LIMIT $2;`, status, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var msgs []*L1Message
|
||||
var msgs []*types.L1Message
|
||||
for rows.Next() {
|
||||
msg := &L1Message{}
|
||||
msg := &types.L1Message{}
|
||||
if err = rows.StructScan(&msg); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -68,56 +70,55 @@ func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]
|
||||
return msgs, rows.Close()
|
||||
}
|
||||
|
||||
// GetL1ProcessedNonce fetch latest processed message nonce
|
||||
func (m *l1MessageOrm) GetL1ProcessedNonce() (int64, error) {
|
||||
row := m.db.QueryRow(`SELECT MAX(nonce) FROM l1_message WHERE status = $1;`, MsgConfirmed)
|
||||
// GetL1ProcessedQueueIndex fetch latest processed message queue_index
|
||||
func (m *l1MessageOrm) GetL1ProcessedQueueIndex() (int64, error) {
|
||||
row := m.db.QueryRow(`SELECT MAX(queue_index) FROM l1_message WHERE status = $1;`, types.MsgConfirmed)
|
||||
|
||||
var nonce sql.NullInt64
|
||||
if err := row.Scan(&nonce); err != nil {
|
||||
if err == sql.ErrNoRows || !nonce.Valid {
|
||||
var queueIndex sql.NullInt64
|
||||
if err := row.Scan(&queueIndex); err != nil {
|
||||
if err == sql.ErrNoRows || !queueIndex.Valid {
|
||||
// no row means no message
|
||||
// since nonce starts with 0, return -1 as the processed nonce
|
||||
// since queueIndex starts with 0, return -1 as the processed queueIndex
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if nonce.Valid {
|
||||
return nonce.Int64, nil
|
||||
if queueIndex.Valid {
|
||||
return queueIndex.Int64, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// SaveL1Messages batch save a list of layer1 messages
|
||||
func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
|
||||
func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*types.L1Message) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"nonce": msg.Nonce,
|
||||
"queue_index": msg.QueueIndex,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"fee": msg.Fee,
|
||||
"gas_limit": msg.GasLimit,
|
||||
"deadline": msg.Deadline,
|
||||
"calldata": msg.Calldata,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
}
|
||||
}
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash);`, messageMaps)
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (queue_index, msg_hash, height, sender, target, value, gas_limit, calldata, layer1_hash) VALUES (:queue_index, :msg_hash, :height, :sender, :target, :value, :gas_limit, :calldata, :layer1_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
queueIndices := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
nonces = append(nonces, msg.Nonce)
|
||||
queueIndices = append(queueIndices, msg.QueueIndex)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l1Messages", "nonces", nonces, "heights", heights, "err", err)
|
||||
log.Error("failed to insert l1Messages", "queueIndices", queueIndices, "heights", heights, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -132,7 +133,7 @@ func (m *l1MessageOrm) UpdateLayer2Hash(ctx context.Context, msgHash, layer2Hash
|
||||
}
|
||||
|
||||
// UpdateLayer1Status updates message stauts, given message hash
|
||||
func (m *l1MessageOrm) UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error {
|
||||
func (m *l1MessageOrm) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
|
||||
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l1_message set status = ? where msg_hash = ?;"), status, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func (m *l1MessageOrm) UpdateLayer1Status(ctx context.Context, msgHash string, s
|
||||
}
|
||||
|
||||
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
|
||||
func (m *l1MessageOrm) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error {
|
||||
func (m *l1MessageOrm) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
|
||||
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l1_message set status = ?, layer2_hash = ? where msg_hash = ?;"), status, layer2Hash, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -168,8 +169,8 @@ func (m *l1MessageOrm) GetLayer1LatestWatchedHeight() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (m *l1MessageOrm) GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) {
|
||||
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE nonce = $1`, nonce)
|
||||
func (m *l1MessageOrm) GetRelayL1MessageTxHash(queueIndex uint64) (sql.NullString, error) {
|
||||
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE queue_index = $1`, queueIndex)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
type layer2MessageOrm struct {
|
||||
@@ -23,10 +25,10 @@ func NewL2MessageOrm(db *sqlx.DB) L2MessageOrm {
|
||||
}
|
||||
|
||||
// GetL2MessageByNonce fetch message by nonce
|
||||
func (m *layer2MessageOrm) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
|
||||
msg := L2Message{}
|
||||
func (m *layer2MessageOrm) GetL2MessageByNonce(nonce uint64) (*types.L2Message, error) {
|
||||
msg := types.L2Message{}
|
||||
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash, status FROM l2_message WHERE nonce = $1`, nonce)
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash, status FROM l2_message WHERE nonce = $1`, nonce)
|
||||
if err := row.StructScan(&msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -34,10 +36,10 @@ func (m *layer2MessageOrm) GetL2MessageByNonce(nonce uint64) (*L2Message, error)
|
||||
}
|
||||
|
||||
// GetL2MessageByMsgHash fetch message by message hash
|
||||
func (m *layer2MessageOrm) GetL2MessageByMsgHash(msgHash string) (*L2Message, error) {
|
||||
msg := L2Message{}
|
||||
func (m *layer2MessageOrm) GetL2MessageByMsgHash(msgHash string) (*types.L2Message, error) {
|
||||
msg := types.L2Message{}
|
||||
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash, status FROM l2_message WHERE msg_hash = $1`, msgHash)
|
||||
row := m.db.QueryRowx(`SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash, status FROM l2_message WHERE msg_hash = $1`, msgHash)
|
||||
if err := row.StructScan(&msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +73,7 @@ func (m *layer2MessageOrm) MessageProofExist(nonce uint64) (bool, error) {
|
||||
|
||||
// GetL2ProcessedNonce fetch latest processed message nonce
|
||||
func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
|
||||
row := m.db.QueryRow(`SELECT MAX(nonce) FROM l2_message WHERE status = $1;`, MsgConfirmed)
|
||||
row := m.db.QueryRow(`SELECT MAX(nonce) FROM l2_message WHERE status = $1;`, types.MsgConfirmed)
|
||||
|
||||
// no row means no message
|
||||
// since nonce starts with 0, return -1 as the processed nonce
|
||||
@@ -89,8 +91,8 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
|
||||
}
|
||||
|
||||
// GetL2MessagesByStatus fetch list of messages given msg status
|
||||
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
|
||||
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*types.L2Message, error) {
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -102,9 +104,9 @@ func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var msgs []*L2Message
|
||||
var msgs []*types.L2Message
|
||||
for rows.Next() {
|
||||
msg := &L2Message{}
|
||||
msg := &types.L2Message{}
|
||||
if err = rows.StructScan(&msg); err != nil {
|
||||
break
|
||||
}
|
||||
@@ -120,7 +122,7 @@ func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*L2Message) error {
|
||||
func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*types.L2Message) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -134,15 +136,12 @@ func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*L2Mes
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"fee": msg.Fee,
|
||||
"gas_limit": msg.GasLimit,
|
||||
"deadline": msg.Deadline,
|
||||
"calldata": msg.Calldata,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
}
|
||||
}
|
||||
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer2_hash);`, messageMaps)
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, calldata, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :calldata, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
@@ -174,7 +173,7 @@ func (m *layer2MessageOrm) UpdateMessageProof(ctx context.Context, nonce uint64,
|
||||
}
|
||||
|
||||
// UpdateLayer2Status updates message stauts, given message hash
|
||||
func (m *layer2MessageOrm) UpdateLayer2Status(ctx context.Context, msgHash string, status MsgStatus) error {
|
||||
func (m *layer2MessageOrm) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
|
||||
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set status = ? where msg_hash = ?;"), status, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -183,7 +182,7 @@ func (m *layer2MessageOrm) UpdateLayer2Status(ctx context.Context, msgHash strin
|
||||
}
|
||||
|
||||
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
|
||||
func (m *layer2MessageOrm) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error {
|
||||
func (m *layer2MessageOrm) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
|
||||
if _, err := m.db.ExecContext(ctx, m.db.Rebind("update l2_message set status = ?, layer1_hash = ? where msg_hash = ?;"), status, layer1Hash, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
type sessionInfoOrm struct {
|
||||
@@ -17,11 +19,11 @@ func NewSessionInfoOrm(db *sqlx.DB) SessionInfoOrm {
|
||||
return &sessionInfoOrm{db: db}
|
||||
}
|
||||
|
||||
func (o *sessionInfoOrm) GetSessionInfosByIDs(ids []string) ([]*SessionInfo, error) {
|
||||
if len(ids) == 0 {
|
||||
func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error) {
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
query, args, err := sqlx.In("SELECT rollers_info FROM session_info WHERE id IN (?);", ids)
|
||||
query, args, err := sqlx.In("SELECT rollers_info FROM session_info WHERE hash IN (?);", hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -29,13 +31,13 @@ func (o *sessionInfoOrm) GetSessionInfosByIDs(ids []string) ([]*SessionInfo, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sessionInfos []*SessionInfo
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for rows.Next() {
|
||||
var infoBytes []byte
|
||||
if err := rows.Scan(&infoBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionInfo := &SessionInfo{}
|
||||
sessionInfo := &types.SessionInfo{}
|
||||
if err := json.Unmarshal(infoBytes, sessionInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -44,12 +46,12 @@ func (o *sessionInfoOrm) GetSessionInfosByIDs(ids []string) ([]*SessionInfo, err
|
||||
return sessionInfos, nil
|
||||
}
|
||||
|
||||
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *SessionInfo) error {
|
||||
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *types.SessionInfo) error {
|
||||
infoBytes, err := json.Marshal(rollersInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStr := "INSERT INTO session_info (id, rollers_info) VALUES ($1, $2) ON CONFLICT (id) DO UPDATE SET rollers_info = EXCLUDED.rollers_info;"
|
||||
sqlStr := "INSERT INTO session_info (hash, rollers_info) VALUES ($1, $2) ON CONFLICT (hash) DO UPDATE SET rollers_info = EXCLUDED.rollers_info;"
|
||||
_, err = o.db.Exec(sqlStr, rollersInfo.ID, infoBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
type OrmFactory interface {
|
||||
orm.BlockTraceOrm
|
||||
orm.BlockBatchOrm
|
||||
orm.L1BlockOrm
|
||||
orm.L1MessageOrm
|
||||
orm.L2MessageOrm
|
||||
orm.SessionInfoOrm
|
||||
@@ -22,6 +23,7 @@ type OrmFactory interface {
|
||||
type ormFactory struct {
|
||||
orm.BlockTraceOrm
|
||||
orm.BlockBatchOrm
|
||||
orm.L1BlockOrm
|
||||
orm.L1MessageOrm
|
||||
orm.L2MessageOrm
|
||||
orm.SessionInfoOrm
|
||||
@@ -47,6 +49,7 @@ func NewOrmFactory(cfg *DBConfig) (OrmFactory, error) {
|
||||
BlockBatchOrm: orm.NewBlockBatchOrm(db),
|
||||
L1MessageOrm: orm.NewL1MessageOrm(db),
|
||||
L2MessageOrm: orm.NewL2MessageOrm(db),
|
||||
L1BlockOrm: orm.NewL1BlockOrm(db),
|
||||
SessionInfoOrm: orm.NewSessionInfoOrm(db),
|
||||
DB: db,
|
||||
}, nil
|
||||
|
||||
@@ -3,16 +3,22 @@ package database_test
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -20,44 +26,37 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
templateL1Message = []*orm.L1Message{
|
||||
templateL1Message = []*types.L1Message{
|
||||
{
|
||||
Nonce: 1,
|
||||
QueueIndex: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
Fee: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Deadline: uint64(time.Now().Unix()),
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash0",
|
||||
},
|
||||
{
|
||||
Nonce: 2,
|
||||
QueueIndex: 2,
|
||||
MsgHash: "msg_hash2",
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
Fee: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Deadline: uint64(time.Now().Unix()),
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash1",
|
||||
},
|
||||
}
|
||||
templateL2Message = []*orm.L2Message{
|
||||
templateL2Message = []*types.L2Message{
|
||||
{
|
||||
Nonce: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
Fee: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Deadline: uint64(time.Now().Unix()),
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer2Hash: "hash0",
|
||||
@@ -68,15 +67,14 @@ var (
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
Fee: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Deadline: uint64(time.Now().Unix()),
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer2Hash: "hash1",
|
||||
},
|
||||
}
|
||||
blockTrace *types.BlockTrace
|
||||
blockTrace *geth_types.BlockTrace
|
||||
batchData1 *types.BatchData
|
||||
batchData2 *types.BatchData
|
||||
|
||||
dbConfig *database.DBConfig
|
||||
dbImg docker.ImgInstance
|
||||
@@ -106,13 +104,54 @@ func setupEnv(t *testing.T) error {
|
||||
ormBatch = orm.NewBlockBatchOrm(db)
|
||||
ormSession = orm.NewSessionInfoOrm(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_03.json")
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace = &types.BlockTrace{}
|
||||
return json.Unmarshal(templateBlockTrace, blockTrace)
|
||||
blockTrace = &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentBatch := &types.BlockBatch{
|
||||
Index: 1,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData1 = types.NewBatchData(parentBatch, []*geth_types.BlockTrace{blockTrace}, nil)
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
blockTrace2 := &geth_types.BlockTrace{}
|
||||
if err = json.Unmarshal(templateBlockTrace, blockTrace2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &types.BlockBatch{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
}
|
||||
batchData2 = types.NewBatchData(parentBatch2, []*geth_types.BlockTrace{blockTrace2}, nil)
|
||||
|
||||
// insert a fake empty block to batchData2
|
||||
fakeBlockContext := abi.IScrollChainBlockContext{
|
||||
BlockHash: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000dead"),
|
||||
ParentHash: batchData2.Batch.Blocks[0].BlockHash,
|
||||
BlockNumber: batchData2.Batch.Blocks[0].BlockNumber + 1,
|
||||
BaseFee: new(big.Int).SetUint64(0),
|
||||
Timestamp: 123456789,
|
||||
GasLimit: 10000000000000000,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
batchData2.Batch.Blocks = append(batchData2.Batch.Blocks, fakeBlockContext)
|
||||
batchData2.Batch.NewStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
|
||||
|
||||
fmt.Printf("batchhash1 = %x\n", batchData1.Hash())
|
||||
fmt.Printf("batchhash2 = %x\n", batchData2.Hash())
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestOrmFactory run several test cases.
|
||||
@@ -143,27 +182,27 @@ func testOrmBlockTraces(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
|
||||
|
||||
res, err := ormBlock.GetBlockTraces(map[string]interface{}{})
|
||||
res, err := ormBlock.GetL2BlockTraces(map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(res) == 0)
|
||||
|
||||
exist, err := ormBlock.Exist(blockTrace.Header.Number.Uint64())
|
||||
exist, err := ormBlock.IsL2BlockExists(blockTrace.Header.Number.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, exist)
|
||||
|
||||
// Insert into db
|
||||
err = ormBlock.InsertBlockTraces([]*types.BlockTrace{blockTrace})
|
||||
err = ormBlock.InsertL2BlockTraces([]*geth_types.BlockTrace{blockTrace})
|
||||
assert.NoError(t, err)
|
||||
|
||||
res2, err := ormBlock.GetUnbatchedBlocks(map[string]interface{}{})
|
||||
res2, err := ormBlock.GetUnbatchedL2Blocks(map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, len(res2) == 1)
|
||||
|
||||
exist, err = ormBlock.Exist(blockTrace.Header.Number.Uint64())
|
||||
exist, err = ormBlock.IsL2BlockExists(blockTrace.Header.Number.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
|
||||
res, err = ormBlock.GetBlockTraces(map[string]interface{}{
|
||||
res, err = ormBlock.GetL2BlockTraces(map[string]interface{}{
|
||||
"hash": blockTrace.Header.Hash().String(),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -190,16 +229,16 @@ func testOrmL1Message(t *testing.T) {
|
||||
err = ormLayer1.SaveL1Messages(context.Background(), templateL1Message)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash1", orm.MsgConfirmed)
|
||||
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash1", types.MsgConfirmed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash2", orm.MsgSubmitted)
|
||||
err = ormLayer1.UpdateLayer1Status(context.Background(), "msg_hash2", types.MsgSubmitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer1.UpdateLayer2Hash(context.Background(), "msg_hash2", expected)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result, err := ormLayer1.GetL1ProcessedNonce()
|
||||
result, err := ormLayer1.GetL1ProcessedQueueIndex()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1), result)
|
||||
|
||||
@@ -209,7 +248,7 @@ func testOrmL1Message(t *testing.T) {
|
||||
|
||||
msg, err := ormLayer1.GetL1MessageByMsgHash("msg_hash2")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.MsgSubmitted, msg.Status)
|
||||
assert.Equal(t, types.MsgSubmitted, msg.Status)
|
||||
}
|
||||
|
||||
func testOrmL2Message(t *testing.T) {
|
||||
@@ -224,10 +263,10 @@ func testOrmL2Message(t *testing.T) {
|
||||
err = ormLayer2.SaveL2Messages(context.Background(), templateL2Message)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash1", orm.MsgConfirmed)
|
||||
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash1", types.MsgConfirmed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash2", orm.MsgSubmitted)
|
||||
err = ormLayer2.UpdateLayer2Status(context.Background(), "msg_hash2", types.MsgSubmitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = ormLayer2.UpdateLayer1Hash(context.Background(), "msg_hash2", expected)
|
||||
@@ -243,7 +282,7 @@ func testOrmL2Message(t *testing.T) {
|
||||
|
||||
msg, err := ormLayer2.GetL2MessageByMsgHash("msg_hash2")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.MsgSubmitted, msg.Status)
|
||||
assert.Equal(t, types.MsgSubmitted, msg.Status)
|
||||
assert.Equal(t, msg.MsgHash, "msg_hash2")
|
||||
}
|
||||
|
||||
@@ -256,23 +295,18 @@ func testOrmBlockBatch(t *testing.T) {
|
||||
|
||||
dbTx, err := factory.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID1, err := ormBatch.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64()},
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64() + 1},
|
||||
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
|
||||
assert.NoError(t, err)
|
||||
err = ormBlock.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
blockTrace.Header.Number.Uint64(),
|
||||
blockTrace.Header.Number.Uint64() + 1}, batchID1)
|
||||
batchHash1 := batchData1.Hash().Hex()
|
||||
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
|
||||
batchData1.Batch.Blocks[0].BlockNumber}, batchHash1)
|
||||
assert.NoError(t, err)
|
||||
batchID2, err := ormBatch.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64() + 2},
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64() + 3},
|
||||
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
err = ormBatch.NewBatchInDBTx(dbTx, batchData2)
|
||||
assert.NoError(t, err)
|
||||
err = ormBlock.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
blockTrace.Header.Number.Uint64() + 2,
|
||||
blockTrace.Header.Number.Uint64() + 3}, batchID2)
|
||||
batchHash2 := batchData2.Hash().Hex()
|
||||
err = ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
|
||||
batchData2.Batch.Blocks[0].BlockNumber,
|
||||
batchData2.Batch.Blocks[1].BlockNumber}, batchHash2)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
@@ -281,55 +315,55 @@ func testOrmBlockBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(2), len(batches))
|
||||
|
||||
batcheIDs, err := ormBatch.GetPendingBatches(10)
|
||||
batcheHashes, err := ormBatch.GetPendingBatches(10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(2), len(batcheIDs))
|
||||
assert.Equal(t, batchID1, batcheIDs[0])
|
||||
assert.Equal(t, batchID2, batcheIDs[1])
|
||||
assert.Equal(t, int(2), len(batcheHashes))
|
||||
assert.Equal(t, batchHash1, batcheHashes[0])
|
||||
assert.Equal(t, batchHash2, batcheHashes[1])
|
||||
|
||||
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchID1, "commit_tx_1", orm.RollupCommitted)
|
||||
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commit_tx_1", types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batcheIDs, err = ormBatch.GetPendingBatches(10)
|
||||
batcheHashes, err = ormBatch.GetPendingBatches(10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(1), len(batcheIDs))
|
||||
assert.Equal(t, batchID2, batcheIDs[0])
|
||||
assert.Equal(t, int(1), len(batcheHashes))
|
||||
assert.Equal(t, batchHash2, batcheHashes[0])
|
||||
|
||||
provingStatus, err := ormBatch.GetProvingStatusByID(batchID1)
|
||||
provingStatus, err := ormBatch.GetProvingStatusByHash(batchHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskUnassigned, provingStatus)
|
||||
err = ormBatch.UpdateProofByID(context.Background(), batchID1, []byte{1}, []byte{2}, 1200)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, provingStatus)
|
||||
err = ormBatch.UpdateProofByHash(context.Background(), batchHash1, []byte{1}, []byte{2}, 1200)
|
||||
assert.NoError(t, err)
|
||||
err = ormBatch.UpdateProvingStatus(batchID1, orm.ProvingTaskVerified)
|
||||
err = ormBatch.UpdateProvingStatus(batchHash1, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
provingStatus, err = ormBatch.GetProvingStatusByID(batchID1)
|
||||
provingStatus, err = ormBatch.GetProvingStatusByHash(batchHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskVerified, provingStatus)
|
||||
assert.Equal(t, types.ProvingTaskVerified, provingStatus)
|
||||
|
||||
rollupStatus, err := ormBatch.GetRollupStatus(batchID1)
|
||||
rollupStatus, err := ormBatch.GetRollupStatus(batchHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, rollupStatus)
|
||||
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchID1, "finalize_tx_1", orm.RollupFinalized)
|
||||
assert.Equal(t, types.RollupCommitted, rollupStatus)
|
||||
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash1, "finalize_tx_1", types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
rollupStatus, err = ormBatch.GetRollupStatus(batchID1)
|
||||
rollupStatus, err = ormBatch.GetRollupStatus(batchHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, rollupStatus)
|
||||
assert.Equal(t, types.RollupFinalized, rollupStatus)
|
||||
result, err := ormBatch.GetLatestFinalizedBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batchID1, result.ID)
|
||||
assert.Equal(t, batchHash1, result.Hash)
|
||||
|
||||
status1, err := ormBatch.GetRollupStatus(batchID1)
|
||||
status1, err := ormBatch.GetRollupStatus(batchHash1)
|
||||
assert.NoError(t, err)
|
||||
status2, err := ormBatch.GetRollupStatus(batchID2)
|
||||
status2, err := ormBatch.GetRollupStatus(batchHash2)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, status1, status2)
|
||||
statues, err := ormBatch.GetRollupStatusByIDList([]string{batchID1, batchID2, batchID1, batchID2})
|
||||
statues, err := ormBatch.GetRollupStatusByHashList([]string{batchHash1, batchHash2, batchHash1, batchHash2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, statues[0], status1)
|
||||
assert.Equal(t, statues[1], status2)
|
||||
assert.Equal(t, statues[2], status1)
|
||||
assert.Equal(t, statues[3], status2)
|
||||
statues, err = ormBatch.GetRollupStatusByIDList([]string{batchID2, batchID1, batchID2, batchID1})
|
||||
statues, err = ormBatch.GetRollupStatusByHashList([]string{batchHash2, batchHash1, batchHash2, batchHash1})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, statues[0], status2)
|
||||
assert.Equal(t, statues[1], status1)
|
||||
@@ -345,57 +379,54 @@ func testOrmSessionInfo(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(factory.GetDB().DB))
|
||||
dbTx, err := factory.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := ormBatch.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64()},
|
||||
&orm.BlockInfo{Number: blockTrace.Header.Number.Uint64() + 1},
|
||||
"ff", 1, 194676)
|
||||
err = ormBatch.NewBatchInDBTx(dbTx, batchData1)
|
||||
batchHash := batchData1.Hash().Hex()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, ormBlock.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
blockTrace.Header.Number.Uint64(),
|
||||
blockTrace.Header.Number.Uint64() + 1}, batchID))
|
||||
assert.NoError(t, ormBlock.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{
|
||||
batchData1.Batch.Blocks[0].BlockNumber}, batchHash))
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchID, orm.ProvingTaskAssigned))
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskAssigned))
|
||||
|
||||
// empty
|
||||
ids, err := ormBatch.GetAssignedBatchIDs()
|
||||
hashes, err := ormBatch.GetAssignedBatchHashes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(ids))
|
||||
sessionInfos, err := ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.Equal(t, 1, len(hashes))
|
||||
sessionInfos, err := ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
|
||||
sessionInfo := orm.SessionInfo{
|
||||
ID: batchID,
|
||||
Rollers: map[string]*orm.RollerStatus{
|
||||
sessionInfo := types.SessionInfo{
|
||||
ID: batchHash,
|
||||
Rollers: map[string]*types.RollerStatus{
|
||||
"0": {
|
||||
PublicKey: "0",
|
||||
Name: "roller-0",
|
||||
Status: orm.RollerAssigned,
|
||||
Status: types.RollerAssigned,
|
||||
},
|
||||
},
|
||||
StartTimestamp: time.Now().Unix()}
|
||||
|
||||
// insert
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
|
||||
// update
|
||||
sessionInfo.Rollers["0"].Status = orm.RollerProofValid
|
||||
sessionInfo.Rollers["0"].Status = types.RollerProofValid
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
|
||||
// delete
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchID, orm.ProvingTaskVerified))
|
||||
ids, err = ormBatch.GetAssignedBatchIDs()
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskVerified))
|
||||
hashes, err = ormBatch.GetAssignedBatchHashes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ids))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.Equal(t, 0, len(hashes))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
}
|
||||
|
||||
22
go.work.sum
22
go.work.sum
@@ -27,6 +27,7 @@ github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJe
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
|
||||
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
|
||||
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
@@ -55,6 +56,7 @@ github.com/aws/smithy-go v1.1.0 h1:D6CSsM3gdxaGaqXnPgOBCeL6Mophqzu7KJOu7zW78sU=
|
||||
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
@@ -83,10 +85,14 @@ github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJ
|
||||
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
|
||||
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
|
||||
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572 h1:+R8G1+Ftumd0DaveLgMIjrFPcAS4G8MsVXWXiyZL5BY=
|
||||
@@ -136,6 +142,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
@@ -151,6 +158,7 @@ github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv
|
||||
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
|
||||
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
|
||||
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
@@ -216,6 +224,7 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
|
||||
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
|
||||
@@ -278,6 +287,7 @@ github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
@@ -294,6 +304,7 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
@@ -327,17 +338,25 @@ github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM
|
||||
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
@@ -408,6 +427,7 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
@@ -473,6 +493,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -485,7 +506,6 @@ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3M
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
|
||||
@@ -70,6 +70,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
//go:build !mock_prover
|
||||
|
||||
//nolint:typecheck
|
||||
package prover
|
||||
|
||||
/*
|
||||
|
||||
@@ -3,7 +3,7 @@ module scroll-tech/integration-test
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6
|
||||
github.com/stretchr/testify v1.8.0
|
||||
)
|
||||
|
||||
@@ -11,13 +11,11 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/ethereum/go-ethereum v1.10.26 // indirect
|
||||
github.com/ethereum/go-ethereum v1.11.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
|
||||
@@ -103,8 +103,7 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
|
||||
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/ethereum/go-ethereum v1.11.1 h1:EMymmWFzpS7G9l9NvVN8G73cgdUIqDPNRf2YTSGBXlk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
@@ -128,8 +127,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
@@ -227,8 +226,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -282,7 +280,6 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -303,12 +300,10 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6 h1:2kXWJR+mOj09HBh5sUTb4L/OURPSXoQd1NC/10v7otM=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
|
||||
Reference in New Issue
Block a user