From 86175a04c37c0a99fc56f79784baef843e65b38e Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:12:16 +0800 Subject: [PATCH] feat(bridge): upgrade bridge to rollup v2 (#530) Co-authored-by: Richard Zhang Co-authored-by: georgehao Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com> Co-authored-by: HAOYUatHZ --- bridge/Makefile | 4 +- bridge/abi/bridge_abi.go | 42 +- bridge/abi/bridge_abi_test.go | 52 +- bridge/cmd/msg_relayer/app/app.go | 16 - bridge/cmd/rollup_relayer/app/app.go | 20 +- bridge/conf/config.json | 25 +- bridge/go.mod | 4 +- bridge/go.sum | 6 +- bridge/internal/config/db.go | 22 - bridge/internal/config/l2.go | 43 +- .../internal/controller/relayer/l2_relayer.go | 418 ++++++---------- .../controller/relayer/l2_relayer_test.go | 441 ++++------------- .../controller/relayer/relayer_test.go | 65 +-- .../controller/watcher/batch_proposer.go | 467 +++++------------- .../controller/watcher/batch_proposer_test.go | 236 ++------- .../controller/watcher/chunk_proposer.go | 169 +++++++ .../controller/watcher/chunk_proposer_test.go | 45 ++ .../internal/controller/watcher/l1_watcher.go | 12 +- .../controller/watcher/l1_watcher_test.go | 20 +- .../internal/controller/watcher/l2_watcher.go | 101 ++-- .../controller/watcher/l2_watcher_test.go | 6 +- .../controller/watcher/watcher_test.go | 9 +- bridge/internal/orm/batch.go | 378 ++++++++++++++ bridge/internal/orm/block_batch.go | 314 ------------ bridge/internal/orm/block_trace.go | 155 ------ bridge/internal/orm/chunk.go | 218 ++++++++ bridge/internal/orm/common.go | 38 -- bridge/internal/orm/l2_block.go | 208 ++++++++ bridge/internal/orm/migrate/migrate_test.go | 2 +- .../migrate/migrations/00001_block_trace.sql | 38 -- ...02_l1_message.sql => 00001_l1_message.sql} | 13 - ...03_l2_message.sql => 00002_l2_message.sql} | 13 - ...{00005_l1_block.sql => 00003_l1_block.sql} | 0 .../migrate/migrations/00004_block_batch.sql | 49 -- .../orm/migrate/migrations/00004_l2_block.sql | 32 ++ .../orm/migrate/migrations/00005_chunk.sql | 54 ++ .../orm/migrate/migrations/00006_batch.sql | 58 +++ bridge/internal/orm/orm_test.go | 281 +++++++++++ bridge/internal/types/batch.go | 236 --------- .../internal}/types/batch_header.go | 44 +- .../internal}/types/batch_header_test.go | 37 +- bridge/internal/types/batch_test.go | 90 ---- bridge/internal/types/block.go | 125 ++++- {common => bridge/internal}/types/chunk.go | 25 +- .../internal}/types/chunk_test.go | 29 +- bridge/mock_bridge/MockBridgeL1.sol | 317 +++++------- bridge/mock_bridge/MockBridgeL2.sol | 18 - bridge/tests/bridge_test.go | 10 +- bridge/tests/gas_oracle_test.go | 47 +- bridge/tests/l2_message_relay_test.go | 190 ------- bridge/tests/process_start_test.go | 58 +++ bridge/tests/rollup_test.go | 104 ++-- common/go.mod | 2 +- common/go.sum | 4 +- common/version/version.go | 2 +- .../src/L1/gateways/EnforcedTxGateway.sol | 2 +- coordinator/go.mod | 2 +- coordinator/go.sum | 4 +- coordinator/manager_test.go | 3 + database/go.mod | 2 +- database/go.sum | 4 +- go.work.sum | 132 +++-- roller/go.mod | 2 +- roller/go.sum | 4 +- tests/integration-test/go.mod | 2 +- tests/integration-test/go.sum | 58 ++- tests/integration-test/integration_test.go | 50 +- 67 files changed, 2673 insertions(+), 3004 deletions(-) create mode 100644 bridge/internal/controller/watcher/chunk_proposer.go create mode 100644 bridge/internal/controller/watcher/chunk_proposer_test.go create mode 100644 bridge/internal/orm/batch.go delete mode 100644 bridge/internal/orm/block_batch.go delete mode 100644 bridge/internal/orm/block_trace.go create mode 100644 bridge/internal/orm/chunk.go delete mode 100644 bridge/internal/orm/common.go create mode 100644 bridge/internal/orm/l2_block.go delete mode 100644 bridge/internal/orm/migrate/migrations/00001_block_trace.sql rename bridge/internal/orm/migrate/migrations/{00002_l1_message.sql => 00001_l1_message.sql} (79%) rename bridge/internal/orm/migrate/migrations/{00003_l2_message.sql => 00002_l2_message.sql} (79%) rename bridge/internal/orm/migrate/migrations/{00005_l1_block.sql => 00003_l1_block.sql} (100%) delete mode 100644 bridge/internal/orm/migrate/migrations/00004_block_batch.sql create mode 100644 bridge/internal/orm/migrate/migrations/00004_l2_block.sql create mode 100644 bridge/internal/orm/migrate/migrations/00005_chunk.sql create mode 100644 bridge/internal/orm/migrate/migrations/00006_batch.sql create mode 100644 bridge/internal/orm/orm_test.go delete mode 100644 bridge/internal/types/batch.go rename {common => bridge/internal}/types/batch_header.go (70%) rename {common => bridge/internal}/types/batch_header_test.go (85%) delete mode 100644 bridge/internal/types/batch_test.go rename {common => bridge/internal}/types/chunk.go (85%) rename {common => bridge/internal}/types/chunk_test.go (82%) delete mode 100644 bridge/tests/l2_message_relay_test.go create mode 100644 bridge/tests/process_start_test.go diff --git a/bridge/Makefile b/bridge/Makefile index ba47c656d..b0d4d4e19 100644 --- a/bridge/Makefile +++ b/bridge/Makefile @@ -5,8 +5,8 @@ IMAGE_VERSION=latest REPO_ROOT_DIR=./.. mock_abi: - go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go - go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go + cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go + cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go bridge_bins: ## Builds the Bridge bins. go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/ diff --git a/bridge/abi/bridge_abi.go b/bridge/abi/bridge_abi.go index 4bbc50895..92864b459 100644 --- a/bridge/abi/bridge_abi.go +++ b/bridge/abi/bridge_abi.go @@ -36,10 +36,10 @@ var ( // L1CommitBatchEventSignature = keccak256("CommitBatch(bytes32)") L1CommitBatchEventSignature common.Hash - // L1FinalizeBatchEventSignature = keccak256("FinalizeBatch(bytes32)") + // L1FinalizeBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,bytes32)") L1FinalizeBatchEventSignature common.Hash - // L1QueueTransactionEventSignature = keccak256("QueueTransaction(address,address,uint256,uint256,uint256,bytes)") + // L1QueueTransactionEventSignature = keccak256("QueueTransaction(address,address,uint256,uint64,uint256,bytes)") L1QueueTransactionEventSignature common.Hash // L2SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)") @@ -89,47 +89,47 @@ func init() { // ScrollChainMetaData contains all meta data concerning the ScrollChain contract. var ScrollChainMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch\",\"name\":\"batch\",\"type\":\"tuple\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch[]\",\"name\":\"batches\",\"type\":\"tuple[]\"}],\"name\":\"commitBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"getL2MessageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_chainId\",\"type\":\"uint32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldMaxNumL2TxInChunk\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newMaxNumL2TxInChunk\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxNumL2TxInChunk\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"status\",\"type\":\"bool\"}],\"name\":\"UpdateSequencer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldVerifier\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newVerifier\",\"type\":\"address\"}],\"name\":\"UpdateVerifier\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"_version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"_chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"_skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"_prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"_aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"_stateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"importGenesisBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messageQueue\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_verifier\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxNumL2TxInChunk\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"isSequencer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastFinalizedBatchIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"layer2ChainId\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxNumL2TxInChunk\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_maxNumL2TxInChunk\",\"type\":\"uint256\"}],\"name\":\"updateMaxNumL2TxInChunk\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_account\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_status\",\"type\":\"bool\"}],\"name\":\"updateSequencer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newVerifier\",\"type\":\"address\"}],\"name\":\"updateVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"verifier\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n", } // L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract. var L1ScrollMessengerMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL1ScrollMessenger.L2MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldFeeVault\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newFeeVault\",\"type\":\"address\"}],\"name\":\"UpdateFeeVault\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"counterpart\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feeVault\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_counterpart\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_feeVault\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_rollup\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_messageQueue\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"isL1MessageRelayed\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"isL1MessageSent\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"isL2MessageExecuted\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"struct IL1ScrollMessenger.L2MessageProof\",\"name\":\"_proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"_newGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"_refundAddress\",\"type\":\"address\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollup\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_refundAddress\",\"type\":\"address\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"_status\",\"type\":\"bool\"}],\"name\":\"setPause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newFeeVault\",\"type\":\"address\"}],\"name\":\"updateFeeVault\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]\n", } // L1MessageQueueMetaData contains all meta data concerning the L1MessageQueue contract. var L1MessageQueueMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"DequeueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"queueIndex\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGateway\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"UpdateEnforcedTxGateway\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGasOracle\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"UpdateGasOracle\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxGasLimit\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_calldata\",\"type\":\"bytes\"}],\"name\":\"calculateIntrinsicGasFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"computeTransactionHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enforcedTxGateway\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_scrollChain\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_enforcedTxGateway\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_gasOracle\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxGasLimit\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingQueueIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_count\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"popCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scrollChain\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"updateEnforcedTxGateway\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"updateGasOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"updateMaxGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", } // L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract. var L2GasPriceOracleMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"txGas\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"txGasContractCreation\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"zeroGas\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nonZeroGas\",\"type\":\"uint256\"}],\"name\":\"IntrinsicParamsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"}],\"name\":\"calculateIntrinsicGasFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_txGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_txGasContractCreation\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_zeroGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_nonZeroGas\",\"type\":\"uint64\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"intrinsicParams\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"txGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"txGasContractCreation\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"zeroGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"nonZeroGas\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_txGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_txGasContractCreation\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_zeroGas\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_nonZeroGas\",\"type\":\"uint64\"}],\"name\":\"setIntrinsicParams\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n", } // L2ScrollMessengerMetaData contains all meta data concerning the L2ScrollMessenger contract. var L2ScrollMessengerMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"relayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"stateRootProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL2ScrollMessenger.L1MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"retryMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_blockContainer\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_gasOracle\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_messageQueue\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldFeeVault\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newFeeVault\",\"type\":\"address\"}],\"name\":\"UpdateFeeVault\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"maxFailedExecutionTimes\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxFailedExecutionTimes\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blockContainer\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counterpart\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feeVault\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_counterpart\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_feeVault\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"isL1MessageExecuted\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"isL2MessageSent\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"l1MessageFailedTimes\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxFailedExecutionTimes\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"}],\"name\":\"relayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"stateRootProof\",\"type\":\"bytes\"}],\"internalType\":\"struct IL2ScrollMessenger.L1MessageProof\",\"name\":\"_proof\",\"type\":\"tuple\"}],\"name\":\"retryMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"_status\",\"type\":\"bool\"}],\"name\":\"setPause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newFeeVault\",\"type\":\"address\"}],\"name\":\"updateFeeVault\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_maxFailedExecutionTimes\",\"type\":\"uint256\"}],\"name\":\"updateMaxFailedExecutionTimes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_msgHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"_proof\",\"type\":\"bytes\"}],\"name\":\"verifyMessageExecutionStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_msgHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"_proof\",\"type\":\"bytes\"}],\"name\":\"verifyMessageInclusionStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]\n", } // L1BlockContainerMetaData contains all meta data concerning the L1BlockContainer contract. var L1BlockContainerMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockTimestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"}],\"name\":\"ImportBlock\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"name\":\"getBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"name\":\"getStateRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blockHeaderRLP\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"updateGasPriceOracle\",\"type\":\"bool\"}],\"name\":\"importBlockHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockTimestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"}],\"name\":\"ImportBlock\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"}],\"name\":\"getBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"}],\"name\":\"getStateRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"_blockHeaderRLP\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"_updateGasPriceOracle\",\"type\":\"bool\"}],\"name\":\"importBlockHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_startBlockHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"_startBlockHeight\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_startBlockTimestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint128\",\"name\":\"_startBlockBaseFee\",\"type\":\"uint128\"},{\"internalType\":\"bytes32\",\"name\":\"_startStateRoot\",\"type\":\"bytes32\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"metadata\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"height\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint128\",\"name\":\"baseFee\",\"type\":\"uint128\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"stateRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n", } // L2MessageQueueMetaData contains all meta data concerning the L2MessageQueue contract. var L2MessageQueueMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"AppendMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_messageHash\",\"type\":\"bytes32\"}],\"name\":\"appendMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"branches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"}],\"name\":\"updateMessenger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"AppendMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_messageHash\",\"type\":\"bytes32\"}],\"name\":\"appendMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"branches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"}],\"name\":\"updateMessenger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]\n", } // L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract. var L1GasPriceOracleMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n", } // IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct. type IL1ScrollMessengerL2MessageProof struct { - BatchHash common.Hash + BatchIndex *big.Int MerkleProof []byte } @@ -163,7 +163,9 @@ type L1CommitBatchEvent struct { // L1FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. type L1FinalizeBatchEvent struct { - BatchHash common.Hash + BatchHash common.Hash + StateRoot common.Hash + WithdrawRoot common.Hash } // L1RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. @@ -176,7 +178,7 @@ type L1QueueTransactionEvent struct { Sender common.Address Target common.Address Value *big.Int - QueueIndex *big.Int + QueueIndex uint64 GasLimit *big.Int Data []byte } @@ -207,15 +209,6 @@ type L2AppendMessageEvent struct { MessageHash common.Hash } -// L2ImportBlockEvent represents a ImportBlock event raised by the L1BlockContainer contract. -type L2ImportBlockEvent struct { - BlockHash common.Hash - BlockHeight *big.Int - BlockTimestamp *big.Int - BaseFee *big.Int - StateRoot common.Hash -} - // L2SentMessageEvent represents a SentMessage event raised by the L2ScrollMessenger contract. type L2SentMessageEvent struct { Sender common.Address @@ -235,8 +228,3 @@ type L2FailedRelayedMessageEvent struct { type L2RelayedMessageEvent struct { MessageHash common.Hash } - -// GetBatchCalldataLength gets the calldata bytelen of IScrollChainBatch. -func GetBatchCalldataLength(batch *IScrollChainBatch) uint64 { - return uint64(5*32 + len(batch.L2Transactions) + len(batch.Blocks)*8*32) -} diff --git a/bridge/abi/bridge_abi_test.go b/bridge/abi/bridge_abi_test.go index bef3bdb48..b12290068 100644 --- a/bridge/abi/bridge_abi_test.go +++ b/bridge/abi/bridge_abi_test.go @@ -16,9 +16,9 @@ func TestEventSignature(t *testing.T) { assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f")) assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62")) - assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79")) + assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("9d3058a3cb9739a2527f22dd9a4138065844037d3004254952e2458d808cc364")) - assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439")) + assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e")) assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e")) assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c")) @@ -35,10 +35,10 @@ func TestPackRelayL2MessageWithProof(t *testing.T) { assert.NoError(err) proof := IL1ScrollMessengerL2MessageProof{ - BatchHash: common.Hash{}, - MerkleProof: make([]byte, 0), + BatchIndex: big.NewInt(0), + MerkleProof: []byte{}, } - _, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof) + _, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{}, proof) assert.NoError(err) } @@ -48,27 +48,12 @@ func TestPackCommitBatch(t *testing.T) { scrollChainABI, err := ScrollChainMetaData.GetAbi() assert.NoError(err) - header := IScrollChainBlockContext{ - BlockHash: common.Hash{}, - ParentHash: common.Hash{}, - BlockNumber: 0, - Timestamp: 0, - BaseFee: big.NewInt(0), - GasLimit: 0, - NumTransactions: 0, - NumL1Messages: 0, - } + version := uint8(1) + var parentBatchHeader []byte + var chunks [][]byte + var skippedL1MessageBitmap []byte - batch := IScrollChainBatch{ - Blocks: []IScrollChainBlockContext{header}, - PrevStateRoot: common.Hash{}, - NewStateRoot: common.Hash{}, - WithdrawTrieRoot: common.Hash{}, - BatchIndex: 0, - L2Transactions: make([]byte, 0), - } - - _, err = scrollChainABI.Pack("commitBatch", batch) + _, err = scrollChainABI.Pack("commitBatch", version, parentBatchHeader, chunks, skippedL1MessageBitmap) assert.NoError(err) } @@ -78,14 +63,13 @@ func TestPackFinalizeBatchWithProof(t *testing.T) { l1RollupABI, err := ScrollChainMetaData.GetAbi() assert.NoError(err) - proof := make([]*big.Int, 10) - instance := make([]*big.Int, 10) - for i := 0; i < 10; i++ { - proof[i] = big.NewInt(0) - instance[i] = big.NewInt(0) - } + batchHeader := []byte{} + prevStateRoot := common.Hash{} + postStateRoot := common.Hash{} + withdrawRoot := common.Hash{} + aggrProof := []byte{} - _, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance) + _, err = l1RollupABI.Pack("finalizeBatchWithProof", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, aggrProof) assert.NoError(err) } @@ -95,7 +79,7 @@ func TestPackRelayL1Message(t *testing.T) { l2MessengerABI, err := L2ScrollMessengerMetaData.GetAbi() assert.NoError(err) - _, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0)) + _, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{}) assert.NoError(err) } @@ -126,6 +110,6 @@ func TestPackImportBlock(t *testing.T) { l1BlockContainerABI := L1BlockContainerABI - _, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false) + _, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false) assert.NoError(err) } diff --git a/bridge/cmd/msg_relayer/app/app.go b/bridge/cmd/msg_relayer/app/app.go index 962f6445c..6c8f9fdf9 100644 --- a/bridge/cmd/msg_relayer/app/app.go +++ b/bridge/cmd/msg_relayer/app/app.go @@ -7,7 +7,6 @@ import ( "os/signal" "time" - "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/log" "github.com/urfave/cli/v2" @@ -63,30 +62,15 @@ func action(ctx *cli.Context) error { // Start metrics server. metrics.Serve(subCtx, ctx) - // Init l2geth connection - l2client, err := ethclient.Dial(cfg.L2Config.Endpoint) - if err != nil { - log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err) - return err - } - l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig) if err != nil { log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err) return err } - l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig) - if err != nil { - log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err) - return err - } // Start l1relayer process go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents) - // Start l2relayer process - go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents) - // Finish start all message relayer functions log.Info("Start message-relayer successfully") diff --git a/bridge/cmd/rollup_relayer/app/app.go b/bridge/cmd/rollup_relayer/app/app.go index 62e148d70..82392e967 100644 --- a/bridge/cmd/rollup_relayer/app/app.go +++ b/bridge/cmd/rollup_relayer/app/app.go @@ -76,7 +76,13 @@ func action(ctx *cli.Context) error { return err } - batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, db) + chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db) + if err != nil { + log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err) + return err + } + + batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db) if err != nil { log.Error("failed to create batchProposer", "config file", cfgFile, "error", err) return err @@ -91,14 +97,14 @@ func action(ctx *cli.Context) error { log.Error("failed to get block number", "err", loopErr) return } - l2watcher.TryFetchRunningMissingBlocks(ctx, number) + l2watcher.TryFetchRunningMissingBlocks(number) }) - // Batch proposer loop - go cutils.Loop(subCtx, 2*time.Second, func() { - batchProposer.TryProposeBatch() - batchProposer.TryCommitBatches() - }) + go cutils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk) + + go cutils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch) + + go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches) diff --git a/bridge/conf/config.json b/bridge/conf/config.json index 59d8315cf..02e228848 100644 --- a/bridge/conf/config.json +++ b/bridge/conf/config.json @@ -72,19 +72,20 @@ "1414141414141414141414141414141414141414141414141414141414141414" ] }, + "chunk_proposer_config": { + "max_tx_gas_per_chunk": 1123456, + "max_l2_tx_num_per_chunk": 1123, + "max_l1_commit_gas_per_chunk": 11234567, + "max_l1_commit_calldata_size_per_chunk": 112345, + "min_l1_commit_calldata_size_per_chunk": 11234, + "chunk_timeout_sec": 300 + }, "batch_proposer_config": { - "proof_generation_freq": 1, - "batch_gas_threshold": 3000000, - "batch_tx_num_threshold": 44, - "batch_time_sec": 300, - "batch_commit_time_sec": 1200, - "batch_blocks_limit": 100, - "commit_tx_calldata_size_limit": 200000, - "commit_tx_batch_count_limit": 30, - "public_input_config": { - "max_tx_num": 44, - "padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" - } + "max_chunk_num_per_batch": 112, + "max_l1_commit_gas_per_batch": 11234567, + "max_l1_commit_calldata_size_per_batch": 112345, + "min_chunk_num_per_batch": 11, + "batch_timeout_sec": 300 } }, "db_config": { diff --git a/bridge/go.mod b/bridge/go.mod index a38c2b477..554e6406e 100644 --- a/bridge/go.mod +++ b/bridge/go.mod @@ -9,14 +9,13 @@ require ( github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/pressly/goose/v3 v3.7.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 + github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/smartystreets/goconvey v1.8.0 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa golang.org/x/sync v0.1.0 gorm.io/driver/postgres v1.5.0 gorm.io/gorm v1.25.1 - modernc.org/mathutil v1.4.1 ) require ( @@ -48,7 +47,6 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-sqlite3 v1.14.14 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rjeczalik/notify v0.9.1 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect diff --git a/bridge/go.sum b/bridge/go.sum index 5db727780..c1684a7dc 100644 --- a/bridge/go.sum +++ b/bridge/go.sum @@ -117,7 +117,6 @@ github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -130,8 +129,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 h1:EIR8gXpdNwHnlUlA2giFp+EoRqHGtpINLjJvo31IGM4= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE= github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -246,7 +245,6 @@ modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A= modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs= modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo= modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8= diff --git a/bridge/internal/config/db.go b/bridge/internal/config/db.go index af116d172..444cd5a11 100644 --- a/bridge/internal/config/db.go +++ b/bridge/internal/config/db.go @@ -1,11 +1,5 @@ package config -import ( - "encoding/json" - "os" - "path/filepath" -) - // DBConfig db config type DBConfig struct { // data source name @@ -15,19 +9,3 @@ type DBConfig struct { MaxOpenNum int `json:"maxOpenNum"` MaxIdleNum int `json:"maxIdleNum"` } - -// NewDBConfig returns a new instance of Config. -func NewDBConfig(file string) (*DBConfig, error) { - buf, err := os.ReadFile(filepath.Clean(file)) - if err != nil { - return nil, err - } - - cfg := &DBConfig{} - err = json.Unmarshal(buf, cfg) - if err != nil { - return nil, err - } - - return cfg, nil -} diff --git a/bridge/internal/config/l2.go b/bridge/internal/config/l2.go index 05c7ef965..0f0195c4e 100644 --- a/bridge/internal/config/l2.go +++ b/bridge/internal/config/l2.go @@ -4,8 +4,6 @@ import ( "github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/common" - - "scroll-tech/bridge/internal/types" ) // L2Config loads l2geth configuration items. @@ -22,30 +20,27 @@ type L2Config struct { WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"` // The relayer config RelayerConfig *RelayerConfig `json:"relayer_config"` + // The chunk_proposer config + ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"` // The batch_proposer config BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"` } -// BatchProposerConfig loads l2watcher batch_proposer configuration items. -type BatchProposerConfig struct { - // Proof generation frequency, generating proof every k blocks - ProofGenerationFreq uint64 `json:"proof_generation_freq"` - // Txnum threshold in a batch - BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"` - // Gas threshold in a batch - BatchGasThreshold uint64 `json:"batch_gas_threshold"` - // Time waited to generate a batch even if gas_threshold not met - BatchTimeSec uint64 `json:"batch_time_sec"` - // Time waited to commit batches before the calldata met CommitTxCalldataSizeLimit - BatchCommitTimeSec uint64 `json:"batch_commit_time_sec"` - // Max number of blocks in a batch - BatchBlocksLimit uint64 `json:"batch_blocks_limit"` - // Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas - CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"` - // Commit tx calldata min size limit in bytes - CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"` - // Max number of batches in a commit transaction - CommitTxBatchCountLimit uint64 `json:"commit_tx_batch_count_limit"` - // The public input hash config - PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"` +// ChunkProposerConfig loads chunk_proposer configuration items. +type ChunkProposerConfig struct { + MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"` + MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"` + MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"` + MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"` + MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"` + ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"` +} + +// BatchProposerConfig loads batch_proposer configuration items. +type BatchProposerConfig struct { + MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"` + MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"` + MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"` + MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"` + BatchTimeoutSec uint64 `json:"batch_timeout_sec"` } diff --git a/bridge/internal/controller/relayer/l2_relayer.go b/bridge/internal/controller/relayer/l2_relayer.go index 0c6fa034c..ecf185652 100644 --- a/bridge/internal/controller/relayer/l2_relayer.go +++ b/bridge/internal/controller/relayer/l2_relayer.go @@ -4,18 +4,14 @@ import ( "context" "errors" "math/big" - "runtime" "sync" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/log" gethMetrics "github.com/scroll-tech/go-ethereum/metrics" - "golang.org/x/sync/errgroup" "gorm.io/gorm" - "modernc.org/mathutil" "scroll-tech/common/metrics" "scroll-tech/common/types" @@ -25,14 +21,11 @@ import ( "scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/orm" bridgeTypes "scroll-tech/bridge/internal/types" - "scroll-tech/bridge/internal/utils" ) var ( - bridgeL2MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) - bridgeL2MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry) @@ -49,9 +42,10 @@ type Layer2Relayer struct { l2Client *ethclient.Client - blockBatchOrm *orm.BlockBatch - blockTraceOrm *orm.BlockTrace - l2MessageOrm *orm.L2Message + batchOrm *orm.Batch + chunkOrm *orm.Chunk + l2BlockOrm *orm.L2Block + l2MessageOrm *orm.L2Message cfg *config.RelayerConfig @@ -75,8 +69,8 @@ type Layer2Relayer struct { processingMessage sync.Map // A list of processing batches commitment. - // key(string): confirmation ID, value([]string): batch hashes. - processingBatchesCommitment sync.Map + // key(string): confirmation ID, value(string): batch hash. + processingCommitment sync.Map // A list of processing batch finalization. // key(string): confirmation ID, value(string): batch hash. @@ -122,9 +116,10 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. layer2Relayer := &Layer2Relayer{ ctx: ctx, - blockBatchOrm: orm.NewBlockBatch(db), - l2MessageOrm: orm.NewL2Message(db), - blockTraceOrm: orm.NewBlockTrace(db), + batchOrm: orm.NewBatch(db), + l2MessageOrm: orm.NewL2Message(db), + l2BlockOrm: orm.NewL2Block(db), + chunkOrm: orm.NewChunk(db), l2Client: l2Client, @@ -142,134 +137,18 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. minGasPrice: minGasPrice, gasPriceDiff: gasPriceDiff, - cfg: cfg, - processingMessage: sync.Map{}, - processingBatchesCommitment: sync.Map{}, - processingFinalization: sync.Map{}, + cfg: cfg, + processingMessage: sync.Map{}, + processingCommitment: sync.Map{}, + processingFinalization: sync.Map{}, } go layer2Relayer.handleConfirmLoop(ctx) return layer2Relayer, nil } -const processMsgLimit = 100 - -// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain -func (r *Layer2Relayer) ProcessSavedEvents() { - batch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus([]types.RollupStatus{types.RollupFinalized}) - if err != nil { - log.Error("GetLatestFinalizedBatch failed", "err", err) - return - } - - // msgs are sorted by nonce in increasing order - fields := map[string]interface{}{ - "status": int(types.MsgPending), - "height <= (?)": batch.EndBlockNumber, - } - orderByList := []string{ - "nonce ASC", - } - limit := processMsgLimit - - msgs, err := r.l2MessageOrm.GetL2Messages(fields, orderByList, limit) - if err != nil { - log.Error("Failed to fetch unprocessed L2 messages", "err", err) - return - } - - // process messages in batches - batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts()) - for size := 0; len(msgs) > 0; msgs = msgs[size:] { - if size = len(msgs); size > batchSize { - size = batchSize - } - var g errgroup.Group - for _, msg := range msgs[:size] { - msg := msg - g.Go(func() error { - return r.processSavedEvent(&msg) - }) - } - if err := g.Wait(); err != nil { - if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { - log.Error("failed to process l2 saved event", "err", err) - } - return - } - } -} - -func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error { - // @todo fetch merkle proof from l2geth - log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height) - - // Get the block info that contains the message - blockInfos, err := r.blockTraceOrm.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}, nil, 0) - if err != nil { - log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height) - } - if len(blockInfos) == 0 { - return errors.New("get block trace len is 0, exit") - } - - blockInfo := blockInfos[0] - if blockInfo.BatchHash == "" { - log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce) - return nil - } - - // TODO: rebuild the withdraw trie to generate the merkle proof - proof := bridgeAbi.IL1ScrollMessengerL2MessageProof{ - BatchHash: common.HexToHash(blockInfo.BatchHash), - MerkleProof: make([]byte, 0), - } - from := common.HexToAddress(msg.Sender) - target := common.HexToAddress(msg.Target) - value, ok := big.NewInt(0).SetString(msg.Value, 10) - if !ok { - // @todo maybe panic? - log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height) - // TODO: need to skip this message by changing its status to MsgError - } - msgNonce := big.NewInt(int64(msg.Nonce)) - calldata := common.Hex2Bytes(msg.Calldata) - data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, msgNonce, calldata, proof) - if err != nil { - log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err) - // TODO: need to skip this message by changing its status to MsgError - return err - } - - hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay) - if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) { - return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired) - } - if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) { - return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed) - } - if err != nil { - if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { - log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err) - } - return err - } - bridgeL2MsgsRelayedTotalCounter.Inc(1) - log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String()) - - // save status in db - // @todo handle db error - err = r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String()) - if err != nil { - log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err) - return err - } - r.processingMessage.Store(msg.MsgHash, msg.MsgHash) - return nil -} - // ProcessGasPriceOracle imports gas price to layer1 func (r *Layer2Relayer) ProcessGasPriceOracle() { - batch, err := r.blockBatchOrm.GetLatestBatch() + batch, err := r.batchOrm.GetLatestBatch(r.ctx) if err != nil { log.Error("Failed to GetLatestBatch", "err", err) return @@ -300,7 +179,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() { return } - err = r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String()) + err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String()) if err != nil { log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err) return @@ -311,93 +190,119 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() { } } -// SendCommitTx sends commitBatches tx to L1. -func (r *Layer2Relayer) SendCommitTx(batchData []*bridgeTypes.BatchData) error { - if len(batchData) == 0 { - log.Error("SendCommitTx receives empty batch") - return nil - } - - // pack calldata - commitBatches := make([]bridgeAbi.IScrollChainBatch, len(batchData)) - for i, batch := range batchData { - commitBatches[i] = batch.Batch - } - calldata, err := r.l1RollupABI.Pack("commitBatches", commitBatches) +// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1. +func (r *Layer2Relayer) ProcessPendingBatches() { + // get pending batches from database in ascending order by their index. + pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10) if err != nil { - log.Error("Failed to pack commitBatches", - "error", err, - "start_batch_index", commitBatches[0].BatchIndex, - "end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex) - return err + log.Error("Failed to fetch pending L2 batches", "err", err) + return } - - // generate a unique txID and send transaction - var bytes []byte - for _, batch := range batchData { - bytes = append(bytes, batch.Hash().Bytes()...) - } - txID := crypto.Keccak256Hash(bytes).String() - txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0) - if err != nil { - if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { - log.Error("Failed to send commitBatches tx to layer1 ", "err", err) - } - return err - } - bridgeL2BatchesCommittedTotalCounter.Inc(int64(len(commitBatches))) - log.Info("Sent the commitBatches tx to layer1", - "tx_hash", txHash.Hex(), - "start_batch_index", commitBatches[0].BatchIndex, - "end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex) - - // record and sync with db, @todo handle db error - batchHashes := make([]string, len(batchData)) - for i, batch := range batchData { - batchHashes[i] = batch.Hash().Hex() - err = r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting) + for _, batch := range pendingBatches { + // get current header and parent header. + currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader) if err != nil { - log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err) + log.Error("Failed to decode batch header", "index", batch.Index, "error", err) + return } + parentBatch := &orm.Batch{} + if batch.Index > 0 { + parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1) + if err != nil { + log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err) + return + } + } + + // get the chunks for the batch + startChunkIndex := batch.StartChunkIndex + endChunkIndex := batch.EndChunkIndex + dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, startChunkIndex, endChunkIndex) + if err != nil { + log.Error("Failed to fetch chunks", + "start index", startChunkIndex, + "end index", endChunkIndex, "error", err) + return + } + + encodedChunks := make([][]byte, len(dbChunks)) + for i, c := range dbChunks { + var wrappedBlocks []*bridgeTypes.WrappedBlock + wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) + if err != nil { + log.Error("Failed to fetch wrapped blocks", + "start number", c.StartBlockNumber, + "end number", c.EndBlockNumber, "error", err) + return + } + chunk := &bridgeTypes.Chunk{ + Blocks: wrappedBlocks, + } + var chunkBytes []byte + chunkBytes, err = chunk.Encode(c.TotalL1MessagesPoppedBefore) + if err != nil { + log.Error("Failed to encode chunk", "error", err) + return + } + encodedChunks[i] = chunkBytes + } + + calldata, err := r.l1RollupABI.Pack("commitBatch", currentBatchHeader.Version(), parentBatch.BatchHeader, encodedChunks, currentBatchHeader.SkippedL1MessageBitmap()) + if err != nil { + log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err) + return + } + + // send transaction + txID := batch.Hash + "-commit" + txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0) + if err != nil { + if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { + log.Error("Failed to send commitBatch tx to layer1 ", "err", err) + } + return + } + + err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting) + if err != nil { + log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err) + return + } + bridgeL2BatchesCommittedTotalCounter.Inc(1) + r.processingCommitment.Store(txID, batch.Hash) + log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex()) } - r.processingBatchesCommitment.Store(txID, batchHashes) - return nil } // ProcessCommittedBatches submit proof to layer 1 rollup contract func (r *Layer2Relayer) ProcessCommittedBatches() { // set skipped batches in a single db operation - if count, err := r.blockBatchOrm.UpdateSkippedBatches(); err != nil { + if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil { log.Error("UpdateSkippedBatches failed", "err", err) // continue anyway } else if count > 0 { - bridgeL2BatchesSkippedTotalCounter.Inc(count) + bridgeL2BatchesSkippedTotalCounter.Inc(int64(count)) log.Info("Skipping batches", "count", count) } - // batches are sorted by batch index in increasing order - batchHashes, err := r.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupCommitted, 1) + // retrieves the earliest batch whose rollup status is 'committed' + fields := map[string]interface{}{ + "rollup_status": types.RollupCommitted, + } + orderByList := []string{"index ASC"} + limit := 1 + batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit) if err != nil { log.Error("Failed to fetch committed L2 batches", "err", err) return } - if len(batchHashes) == 0 { - return - } - hash := batchHashes[0] - // @todo add support to relay multiple batches - - batches, err := r.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": hash}, nil, 1) - if err != nil { - log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err) - return - } - if len(batches) == 0 { - log.Error("Unexpected result for GetBlockBatches", "hash", hash, "len", 0) + if len(batches) != 1 { + log.Warn("Unexpected result for GetBlockBatches", "number of batches", len(batches)) return } batch := batches[0] + hash := batch.Hash status := types.ProvingStatus(batch.ProvingStatus) switch status { case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: @@ -409,55 +314,36 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { return case types.ProvingTaskFailed, types.ProvingTaskSkipped: // note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake - if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { + if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) } case types.ProvingTaskVerified: log.Info("Start to roll up zk proof", "hash", hash) success := false - rollupStatues := []types.RollupStatus{ - types.RollupFinalizing, - types.RollupFinalized, - } - previousBatch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus(rollupStatues) - // skip submitting proof - if err == nil && uint64(batch.CreatedAt.Sub(previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec { - log.Info( - "Not enough time passed, skipping", - "hash", hash, - "createdAt", batch.CreatedAt, - "lastFinalizingHash", previousBatch.Hash, - "lastFinalizingStatus", previousBatch.RollupStatus, - "lastFinalizingCreatedAt", previousBatch.CreatedAt, - ) - - if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { - log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) - } else { - success = true + var parentBatchStateRoot string + if batch.Index > 0 { + var parentBatch *orm.Batch + parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1) + // handle unexpected db error + if err != nil { + log.Error("Failed to get batch", "index", batch.Index-1, "err", err) + return } - - return - } - - // handle unexpected db error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error("Failed to get latest finalized batch", "err", err) - return + parentBatchStateRoot = parentBatch.StateRoot } defer func() { // TODO: need to revisit this and have a more fine-grained error handling if !success { log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash) - if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { + if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) } } }() - aggProof, err := r.blockBatchOrm.GetVerifiedProofByHash(hash) + aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash) if err != nil { log.Warn("get verified proof by hash failed", "hash", hash, "err", err) return @@ -468,9 +354,14 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { return } - proof := utils.BufferToUint256Le(aggProof.Proof) - finalPair := utils.BufferToUint256Le(aggProof.FinalPair) - data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, finalPair) + data, err := r.l1RollupABI.Pack( + "finalizeBatchWithProof", + batch.BatchHeader, + common.HexToHash(parentBatchStateRoot), + common.HexToHash(batch.StateRoot), + common.HexToHash(batch.WithdrawRoot), + aggProof.Proof, + ) if err != nil { log.Error("Pack finalizeBatchWithProof failed", "err", err) return @@ -482,17 +373,20 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { finalizeTxHash := &txHash if err != nil { if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) { - log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err) + log.Error("finalizeBatchWithProof in layer1 failed", + "index", batch.Index, "hash", batch.Hash, "err", err) } return } bridgeL2BatchesFinalizedTotalCounter.Inc(1) - log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash) + log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash) // record and sync with db, @todo handle db error - err = r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing) + err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing) if err != nil { - log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err) + log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", + "index", batch.Index, "batch hash", batch.Hash, + "tx hash", finalizeTxHash.String(), "err", err) } success = true r.processingFinalization.Store(txID, hash) @@ -506,29 +400,10 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) { transactionType := "Unknown" - // check whether it is message relay transaction - if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok { - transactionType = "MessageRelay" - var status types.MsgStatus - if confirmation.IsSuccessful { - status = types.MsgConfirmed - } else { - status = types.MsgRelayFailed - log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) - } - // @todo handle db error - err := r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String()) - if err != nil { - log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err) - } - bridgeL2MsgsRelayedConfirmedTotalCounter.Inc(1) - r.processingMessage.Delete(confirmation.ID) - } // check whether it is CommitBatches transaction - if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok { + if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok { transactionType = "BatchesCommitment" - batchHashes := batchBatches.([]string) var status types.RollupStatus if confirmation.IsSuccessful { status = types.RollupCommitted @@ -536,15 +411,15 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) { status = types.RollupCommitFailed log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) } - for _, batchHash := range batchHashes { - // @todo handle db error - err := r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status) - if err != nil { - log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err) - } + // @todo handle db error + err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status) + if err != nil { + log.Warn("UpdateCommitTxHashAndRollupStatus failed", + "batch hash", batchHash.(string), + "tx hash", confirmation.TxHash.String(), "err", err) } - bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(int64(len(batchHashes))) - r.processingBatchesCommitment.Delete(confirmation.ID) + bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1) + r.processingCommitment.Delete(confirmation.ID) } // check whether it is proof finalization transaction @@ -557,10 +432,13 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) { status = types.RollupFinalizeFailed log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation) } + // @todo handle db error - err := r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status) + err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status) if err != nil { - log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err) + log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", + "batch hash", batchHash.(string), + "tx hash", confirmation.TxHash.String(), "err", err) } bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1) r.processingFinalization.Delete(confirmation.ID) @@ -580,14 +458,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) { case cfm := <-r.gasOracleSender.ConfirmChan(): if !cfm.IsSuccessful { // @discuss: maybe make it pending again? - err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) + err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) if err != nil { log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) } log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm) } else { // @todo handle db error - err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) + err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) if err != nil { log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err) } diff --git a/bridge/internal/controller/relayer/l2_relayer_test.go b/bridge/internal/controller/relayer/l2_relayer_test.go index 4bccb6e91..5ef67e7a3 100644 --- a/bridge/internal/controller/relayer/l2_relayer_test.go +++ b/bridge/internal/controller/relayer/l2_relayer_test.go @@ -2,17 +2,12 @@ package relayer import ( "context" - "encoding/json" "errors" "math/big" - "os" - "strconv" "testing" "github.com/agiledragon/gomonkey/v2" - "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" - gethTypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/smartystreets/goconvey/convey" "github.com/stretchr/testify/assert" "gorm.io/gorm" @@ -28,20 +23,6 @@ import ( bridgeUtils "scroll-tech/bridge/internal/utils" ) -var ( - templateL2Message = []orm.L2Message{ - { - Nonce: 1, - Height: 1, - Sender: "0x596a746661dbed76a84556111c2872249b070e15", - Value: "100", - Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7", - Calldata: "testdata", - Layer2Hash: "hash0", - }, - } -) - func setupL2RelayerDB(t *testing.T) *gorm.DB { db, err := bridgeUtils.InitDB(cfg.DBConfig) assert.NoError(t, err) @@ -59,69 +40,32 @@ func testCreateNewRelayer(t *testing.T) { assert.NotNil(t, relayer) } -func testL2RelayerProcessSaveEvents(t *testing.T) { +func testL2RelayerProcessPendingBatches(t *testing.T) { db := setupL2RelayerDB(t) defer bridgeUtils.CloseDB(db) + l2Cfg := cfg.L2Config relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) assert.NoError(t, err) - l2MessageOrm := orm.NewL2Message(db) - err = l2MessageOrm.SaveL2Messages(context.Background(), templateL2Message) + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) + chunkOrm := orm.NewChunk(db) + dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) + assert.NoError(t, err) + dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) + assert.NoError(t, err) + batchOrm := orm.NewBatch(db) + batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2}) assert.NoError(t, err) - traces := []*bridgeTypes.WrappedBlock{ - { - Header: &gethTypes.Header{ - Number: big.NewInt(int64(templateL2Message[0].Height)), - }, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, - }, - { - Header: &gethTypes.Header{ - Number: big.NewInt(int64(templateL2Message[0].Height + 1)), - }, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, - }, - } + relayer.ProcessPendingBatches() - blockTraceOrm := orm.NewBlockTrace(db) - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces)) - blockBatchOrm := orm.NewBlockBatch(db) - parentBatch1 := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: common.Hash{}.Hex(), - StateRoot: common.Hash{}.Hex(), - } - batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil) - batchHash := batchData1.Hash().Hex() - err = db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, []uint64{1}, batchHash) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) + statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) assert.NoError(t, err) - - err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized) - assert.NoError(t, err) - - relayer.ProcessSavedEvents() - - msg, err := l2MessageOrm.GetL2MessageByNonce(templateL2Message[0].Nonce) - assert.NoError(t, err) - assert.Equal(t, types.MsgSubmitted, types.MsgStatus(msg.Status)) + assert.Equal(t, 1, len(statuses)) + assert.Equal(t, types.RollupCommitting, statuses[0]) } func testL2RelayerProcessCommittedBatches(t *testing.T) { @@ -131,44 +75,34 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { l2Cfg := cfg.L2Config relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) assert.NoError(t, err) - - parentBatch1 := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: common.Hash{}.Hex(), - StateRoot: common.Hash{}.Hex(), - } - - blockBatchOrm := orm.NewBlockBatch(db) - batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil) - batchHash := batchData1.Hash().Hex() - err = db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - return nil - }) + batchOrm := orm.NewBatch(db) + batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) assert.NoError(t, err) - err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted) + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted) assert.NoError(t, err) - proof := &message.AggProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) - assert.NoError(t, err) - err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, types.ProvingTaskVerified) assert.NoError(t, err) relayer.ProcessCommittedBatches() - statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) + statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) + assert.NoError(t, err) + assert.Equal(t, 1, len(statuses)) + assert.Equal(t, types.RollupFinalizationSkipped, statuses[0]) + + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted) + assert.NoError(t, err) + proof := &message.AggProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) + assert.NoError(t, err) + + relayer.ProcessCommittedBatches() + statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.Equal(t, types.RollupFinalizing, statuses[0]) @@ -182,144 +116,63 @@ func testL2RelayerSkipBatches(t *testing.T) { relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig) assert.NoError(t, err) - blockBatchOrm := orm.NewBlockBatch(db) - createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string { - batchData := genBatchData(t, index) - err = db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - return nil - }) + batchOrm := orm.NewBatch(db) + createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string { + batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) assert.NoError(t, err) - batchHash := batchData.Hash().Hex() - err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus) + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus) assert.NoError(t, err) proof := &message.AggProof{ Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) + err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) assert.NoError(t, err) - err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus) + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, provingStatus) assert.NoError(t, err) return batchHash } skipped := []string{ - createBatch(types.RollupCommitted, types.ProvingTaskSkipped, 1), - createBatch(types.RollupCommitted, types.ProvingTaskFailed, 2), + createBatch(types.RollupCommitted, types.ProvingTaskSkipped), + createBatch(types.RollupCommitted, types.ProvingTaskFailed), } notSkipped := []string{ - createBatch(types.RollupPending, types.ProvingTaskSkipped, 3), - createBatch(types.RollupCommitting, types.ProvingTaskSkipped, 4), - createBatch(types.RollupFinalizing, types.ProvingTaskSkipped, 5), - createBatch(types.RollupFinalized, types.ProvingTaskSkipped, 6), - createBatch(types.RollupPending, types.ProvingTaskFailed, 7), - createBatch(types.RollupCommitting, types.ProvingTaskFailed, 8), - createBatch(types.RollupFinalizing, types.ProvingTaskFailed, 9), - createBatch(types.RollupFinalized, types.ProvingTaskFailed, 10), - createBatch(types.RollupCommitted, types.ProvingTaskVerified, 11), + createBatch(types.RollupPending, types.ProvingTaskSkipped), + createBatch(types.RollupCommitting, types.ProvingTaskSkipped), + createBatch(types.RollupFinalizing, types.ProvingTaskSkipped), + createBatch(types.RollupFinalized, types.ProvingTaskSkipped), + createBatch(types.RollupPending, types.ProvingTaskFailed), + createBatch(types.RollupCommitting, types.ProvingTaskFailed), + createBatch(types.RollupFinalizing, types.ProvingTaskFailed), + createBatch(types.RollupFinalized, types.ProvingTaskFailed), + createBatch(types.RollupCommitted, types.ProvingTaskVerified), } relayer.ProcessCommittedBatches() for _, id := range skipped { - statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id}) + statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.Equal(t, types.RollupFinalizationSkipped, statuses[0]) } for _, id := range notSkipped { - statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id}) + statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0]) } } -func testL2RelayerMsgConfirm(t *testing.T) { - db := setupL2RelayerDB(t) - defer bridgeUtils.CloseDB(db) - l2MessageOrm := orm.NewL2Message(db) - insertL2Messages := []orm.L2Message{ - {MsgHash: "msg-1", Nonce: 0}, - {MsgHash: "msg-2", Nonce: 1}, - } - err := l2MessageOrm.SaveL2Messages(context.Background(), insertL2Messages) - assert.NoError(t, err) - - // Create and set up the Layer2 Relayer. - l2Cfg := cfg.L2Config - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig) - assert.NoError(t, err) - - // Simulate message confirmations. - l2Relayer.processingMessage.Store("msg-1", "msg-1") - l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{ - ID: "msg-1", - IsSuccessful: true, - }) - l2Relayer.processingMessage.Store("msg-2", "msg-2") - l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{ - ID: "msg-2", - IsSuccessful: false, - }) - - // Check the database for the updated status using TryTimes. - assert.True(t, utils.TryTimes(5, func() bool { - fields1 := map[string]interface{}{"msg_hash": "msg-1"} - msg1, err1 := l2MessageOrm.GetL2Messages(fields1, nil, 0) - if len(msg1) != 1 { - return false - } - fields2 := map[string]interface{}{"msg_hash": "msg-2"} - msg2, err2 := l2MessageOrm.GetL2Messages(fields2, nil, 0) - if len(msg2) != 1 { - return false - } - return err1 == nil && types.MsgStatus(msg1[0].Status) == types.MsgConfirmed && - err2 == nil && types.MsgStatus(msg2[0].Status) == types.MsgRelayFailed - })) -} - func testL2RelayerRollupConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer bridgeUtils.CloseDB(db) - // Insert test data. - batches := make([]*bridgeTypes.BatchData, 6) - for i := 0; i < 6; i++ { - batches[i] = genBatchData(t, uint64(i)) - } - - blockBatchOrm := orm.NewBlockBatch(db) - err := db.Transaction(func(tx *gorm.DB) error { - for _, batch := range batches { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - } - return nil - }) - assert.NoError(t, err) - // Create and set up the Layer2 Relayer. l2Cfg := cfg.L2Config ctx, cancel := context.WithCancel(context.Background()) @@ -331,22 +184,29 @@ func testL2RelayerRollupConfirm(t *testing.T) { processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"} isSuccessful := []bool{true, false, true, false} + batchOrm := orm.NewBatch(db) + batchHashes := make([]string, len(processingKeys)) + for i := range batchHashes { + var err error + batchHashes[i], err = batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) + assert.NoError(t, err) + } + for i, key := range processingKeys[:2] { - batchHashes := []string{batches[i*2].Hash().Hex(), batches[i*2+1].Hash().Hex()} - l2Relayer.processingBatchesCommitment.Store(key, batchHashes) - l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{ + l2Relayer.processingCommitment.Store(key, batchHashes[i]) + l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{ ID: key, IsSuccessful: isSuccessful[i], + TxHash: common.HexToHash("0x123456789abcdef"), }) } for i, key := range processingKeys[2:] { - batchHash := batches[i+4].Hash().Hex() - l2Relayer.processingFinalization.Store(key, batchHash) + l2Relayer.processingFinalization.Store(key, batchHashes[i+2]) l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{ ID: key, IsSuccessful: isSuccessful[i+2], - TxHash: common.HexToHash("0x56789abcdef1234"), + TxHash: common.HexToHash("0x123456789abcdef"), }) } @@ -354,15 +214,13 @@ func testL2RelayerRollupConfirm(t *testing.T) { ok := utils.TryTimes(5, func() bool { expectedStatuses := []types.RollupStatus{ types.RollupCommitted, - types.RollupCommitted, - types.RollupCommitFailed, types.RollupCommitFailed, types.RollupFinalized, types.RollupFinalizeFailed, } - for i, batch := range batches[:6] { - batchInDB, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0) + for i, batchHash := range batchHashes { + batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0) if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] { return false } @@ -376,26 +234,11 @@ func testL2RelayerGasOracleConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer bridgeUtils.CloseDB(db) - // Insert test data. - batches := make([]*bridgeTypes.BatchData, 2) - for i := 0; i < 2; i++ { - batches[i] = genBatchData(t, uint64(i)) - } + batchOrm := orm.NewBatch(db) + batchHash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1}) + assert.NoError(t, err) - blockBatchOrm := orm.NewBlockBatch(db) - err := db.Transaction(func(tx *gorm.DB) error { - for _, batch := range batches { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - } - return nil - }) + batchHash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) assert.NoError(t, err) // Create and set up the Layer2 Relayer. @@ -406,19 +249,27 @@ func testL2RelayerGasOracleConfirm(t *testing.T) { assert.NoError(t, err) // Simulate message confirmations. - isSuccessful := []bool{true, false} - for i, batch := range batches { - l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{ - ID: batch.Hash().Hex(), - IsSuccessful: isSuccessful[i], - }) + type BatchConfirmation struct { + batchHash string + isSuccessful bool } + confirmations := []BatchConfirmation{ + {batchHash: batchHash1, isSuccessful: true}, + {batchHash: batchHash2, isSuccessful: false}, + } + + for _, confirmation := range confirmations { + l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{ + ID: confirmation.batchHash, + IsSuccessful: confirmation.isSuccessful, + }) + } // Check the database for the updated status using TryTimes. ok := utils.TryTimes(5, func() bool { expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed} - for i, batch := range batches { - gasOracle, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0) + for i, confirmation := range confirmations { + gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0) if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] { return false } @@ -428,21 +279,6 @@ func testL2RelayerGasOracleConfirm(t *testing.T) { assert.True(t, ok) } -func genBatchData(t *testing.T, index uint64) *bridgeTypes.BatchData { - templateBlockTrace, err := os.ReadFile("../../../testdata/blockTrace_02.json") - assert.NoError(t, err) - // unmarshal blockTrace - wrappedBlock := &bridgeTypes.WrappedBlock{} - err = json.Unmarshal(templateBlockTrace, wrappedBlock) - assert.NoError(t, err) - wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16)) - parentBatch := &bridgeTypes.BatchInfo{ - Index: index, - Hash: "0x0000000000000000000000000000000000000000", - } - return bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{wrappedBlock}, nil) -} - func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { db := setupL2RelayerDB(t) defer bridgeUtils.CloseDB(db) @@ -451,19 +287,19 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, relayer) - var blockBatchOrm *orm.BlockBatch + var batchOrm *orm.Batch convey.Convey("Failed to GetLatestBatch", t, func() { targetErr := errors.New("GetLatestBatch error") - patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) { + patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) { return nil, targetErr }) defer patchGuard.Reset() relayer.ProcessGasPriceOracle() }) - patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) { - batch := orm.BlockBatch{ - OracleStatus: int(types.GasOraclePending), + patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) { + batch := orm.Batch{ + OracleStatus: int16(types.GasOraclePending), Hash: "0x0000000000000000000000000000000000000000", } return &batch, nil @@ -508,97 +344,14 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() { targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error") - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { return targetErr }) relayer.ProcessGasPriceOracle() }) - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { return nil }) relayer.ProcessGasPriceOracle() } - -func testLayer2RelayerSendCommitTx(t *testing.T) { - db := setupL2RelayerDB(t) - defer bridgeUtils.CloseDB(db) - - relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) - assert.NoError(t, err) - assert.NotNil(t, relayer) - - var batchDataList []*bridgeTypes.BatchData - convey.Convey("SendCommitTx receives empty batch", t, func() { - err = relayer.SendCommitTx(batchDataList) - assert.NoError(t, err) - }) - - parentBatch := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: "0x0000000000000000000000000000000000000000", - } - - traces := []*bridgeTypes.WrappedBlock{ - { - Header: &gethTypes.Header{ - Number: big.NewInt(1000), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - }, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, - }, - } - - blocks := []*bridgeTypes.WrappedBlock{traces[0]} - tmpBatchData := bridgeTypes.NewBatchData(parentBatch, blocks, cfg.L2Config.BatchProposerConfig.PublicInputConfig) - batchDataList = append(batchDataList, tmpBatchData) - - var s abi.ABI - convey.Convey("Failed to pack commitBatches", t, func() { - targetErr := errors.New("commitBatches error") - patchGuard := gomonkey.ApplyMethodFunc(s, "Pack", func(name string, args ...interface{}) ([]byte, error) { - return nil, targetErr - }) - defer patchGuard.Reset() - - err = relayer.SendCommitTx(batchDataList) - assert.EqualError(t, err, targetErr.Error()) - }) - - patchGuard := gomonkey.ApplyMethodFunc(s, "Pack", func(name string, args ...interface{}) ([]byte, error) { - return nil, nil - }) - defer patchGuard.Reset() - - convey.Convey("Failed to send commitBatches tx to layer1", t, func() { - targetErr := errors.New("SendTransaction failure") - patchGuard.ApplyMethodFunc(relayer.rollupSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) { - return common.Hash{}, targetErr - }) - err = relayer.SendCommitTx(batchDataList) - assert.EqualError(t, err, targetErr.Error()) - }) - - patchGuard.ApplyMethodFunc(relayer.rollupSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) { - return common.HexToHash("0x56789abcdef1234"), nil - }) - - var blockBatchOrm *orm.BlockBatch - convey.Convey("UpdateCommitTxHashAndRollupStatus failed", t, func() { - targetErr := errors.New("UpdateCommitTxHashAndRollupStatus failure") - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { - return targetErr - }) - err = relayer.SendCommitTx(batchDataList) - assert.NoError(t, err) - }) - - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { - return nil - }) - err = relayer.SendCommitTx(batchDataList) - assert.NoError(t, err) -} diff --git a/bridge/internal/controller/relayer/relayer_test.go b/bridge/internal/controller/relayer/relayer_test.go index 0f00adc59..a69f5f85d 100644 --- a/bridge/internal/controller/relayer/relayer_test.go +++ b/bridge/internal/controller/relayer/relayer_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethclient" - "github.com/scroll-tech/go-ethereum/log" "github.com/stretchr/testify/assert" "scroll-tech/common/docker" @@ -24,17 +24,20 @@ var ( // l2geth client l2Cli *ethclient.Client - // block trace + // l2 block wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock2 *bridgeTypes.WrappedBlock - // batch data - batchData1 *bridgeTypes.BatchData - batchData2 *bridgeTypes.BatchData + // chunk + chunk1 *bridgeTypes.Chunk + chunk2 *bridgeTypes.Chunk + chunkHash1 common.Hash + chunkHash2 common.Hash ) -func setupEnv(t *testing.T) (err error) { +func setupEnv(t *testing.T) { // Load config. + var err error cfg, err = config.NewConfig("../../../conf/config.json") assert.NoError(t, err) @@ -54,40 +57,22 @@ func setupEnv(t *testing.T) (err error) { assert.NoError(t, err) templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json") - if err != nil { - return err - } - // unmarshal blockTrace + assert.NoError(t, err) wrappedBlock1 = &bridgeTypes.WrappedBlock{} - if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { - return err - } - parentBatch1 := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729", - StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d", - } - batchData1 = bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil) + err = json.Unmarshal(templateBlockTrace1, wrappedBlock1) + assert.NoError(t, err) + chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} + chunkHash1, err = chunk1.Hash(0) + assert.NoError(t, err) templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json") - if err != nil { - return err - } - // unmarshal blockTrace + assert.NoError(t, err) wrappedBlock2 = &bridgeTypes.WrappedBlock{} - if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { - return err - } - parentBatch2 := &bridgeTypes.BatchInfo{ - Index: batchData1.Batch.BatchIndex, - Hash: batchData1.Hash().Hex(), - StateRoot: batchData1.Batch.NewStateRoot.String(), - } - batchData2 = bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil) - - log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex()) - - return err + err = json.Unmarshal(templateBlockTrace2, wrappedBlock2) + assert.NoError(t, err) + chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} + chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) + assert.NoError(t, err) } func TestMain(m *testing.M) { @@ -99,9 +84,7 @@ func TestMain(m *testing.M) { } func TestFunctions(t *testing.T) { - if err := setupEnv(t); err != nil { - t.Fatal(err) - } + setupEnv(t) // Run l1 relayer test cases. t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer) t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents) @@ -111,12 +94,10 @@ func TestFunctions(t *testing.T) { // Run l2 relayer test cases. t.Run("TestCreateNewRelayer", testCreateNewRelayer) - t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents) + t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches) - t.Run("TestL2RelayerMsgConfirm", testL2RelayerMsgConfirm) t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) - t.Run("TestLayer2RelayerSendCommitTx", testLayer2RelayerSendCommitTx) } diff --git a/bridge/internal/controller/watcher/batch_proposer.go b/bridge/internal/controller/watcher/batch_proposer.go index cebdf7a54..2146ff970 100644 --- a/bridge/internal/controller/watcher/batch_proposer.go +++ b/bridge/internal/controller/watcher/batch_proposer.go @@ -3,393 +3,170 @@ package watcher import ( "context" "fmt" - "math" - "sync" "time" "github.com/scroll-tech/go-ethereum/log" - gethMetrics "github.com/scroll-tech/go-ethereum/metrics" "gorm.io/gorm" - "scroll-tech/common/metrics" - "scroll-tech/common/types" - - bridgeAbi "scroll-tech/bridge/abi" "scroll-tech/bridge/internal/config" - "scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/orm" bridgeTypes "scroll-tech/bridge/internal/types" ) -var ( - bridgeL2BatchesGasOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry) - bridgeL2BatchesTxsOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry) - bridgeL2BatchesBlocksCreatedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry) - bridgeL2BatchesCommitsSentTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry) - bridgeL2BatchesOversizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/oversized/total", metrics.ScrollRegistry) - bridgeL2BatchesTxsCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry) - bridgeL2BatchesGasCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry) - bridgeL2BatchesPayloadSizePerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/payload/size/per/batch", metrics.ScrollRegistry) -) - -// BatchProposer sends batches commit transactions to relayer. +// BatchProposer proposes batches based on available unbatched chunks. type BatchProposer struct { - mutex sync.Mutex - ctx context.Context - db *gorm.DB + ctx context.Context + db *gorm.DB - batchTimeSec uint64 - batchGasThreshold uint64 - batchTxNumThreshold uint64 - batchBlocksLimit uint64 - batchCommitTimeSec uint64 - commitCalldataSizeLimit uint64 - batchDataBufferSizeLimit uint64 - commitCalldataMinSize uint64 - commitBatchCountLimit int + batchOrm *orm.Batch + chunkOrm *orm.Chunk + l2Block *orm.L2Block - proofGenerationFreq uint64 - batchDataBuffer []*bridgeTypes.BatchData - relayer *relayer.Layer2Relayer - - blockBatchOrm *orm.BlockBatch - blockTraceOrm *orm.BlockTrace - - piCfg *bridgeTypes.PublicInputHashConfig + maxChunkNumPerBatch uint64 + maxL1CommitGasPerBatch uint64 + maxL1CommitCalldataSizePerBatch uint64 + minChunkNumPerBatch uint64 + batchTimeoutSec uint64 } -// NewBatchProposer will return a new instance of BatchProposer. -func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, db *gorm.DB) *BatchProposer { - p := &BatchProposer{ - mutex: sync.Mutex{}, - ctx: ctx, - db: db, - blockBatchOrm: orm.NewBlockBatch(db), - blockTraceOrm: orm.NewBlockTrace(db), - batchTimeSec: cfg.BatchTimeSec, - batchGasThreshold: cfg.BatchGasThreshold, - batchTxNumThreshold: cfg.BatchTxNumThreshold, - batchBlocksLimit: cfg.BatchBlocksLimit, - batchCommitTimeSec: cfg.BatchCommitTimeSec, - commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit, - commitCalldataMinSize: cfg.CommitTxCalldataMinSize, - commitBatchCountLimit: int(cfg.CommitTxBatchCountLimit), - batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value. - proofGenerationFreq: cfg.ProofGenerationFreq, - piCfg: cfg.PublicInputConfig, - relayer: relayer, - } - - // for graceful restart. - p.recoverBatchDataBuffer() - - // try to commit the leftover pending batches - p.TryCommitBatches() - - return p -} - -func (p *BatchProposer) recoverBatchDataBuffer() { - // batches are sorted by batch index in increasing order - batchHashes, err := p.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32) - if err != nil { - log.Crit("Failed to fetch pending L2 batches", "err", err) - } - if len(batchHashes) == 0 { - return - } - log.Info("Load pending batches into batchDataBuffer") - - // helper function to cache and get BlockBatch from DB - blockBatchCache := make(map[string]orm.BlockBatch) - getBlockBatch := func(batchHash string) (*orm.BlockBatch, error) { - if blockBatch, ok := blockBatchCache[batchHash]; ok { - return &blockBatch, nil - } - blockBatches, err := p.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 0) - if err != nil || len(blockBatches) == 0 { - return nil, err - } - blockBatchCache[batchHash] = blockBatches[0] - return &blockBatches[0], nil - } - - // recover the in-memory batchData from DB - for _, batchHash := range batchHashes { - log.Info("recover batch data from pending batch", "batch_hash", batchHash) - blockBatch, err := getBlockBatch(batchHash) - if err != nil { - log.Error("could not get BlockBatch", "batch_hash", batchHash, "error", err) - continue - } - - parentBatch, err := getBlockBatch(blockBatch.ParentHash) - if err != nil { - log.Error("could not get parent BlockBatch", "batch_hash", batchHash, "error", err) - continue - } - - whereFileds := map[string]interface{}{ - "batch_hash": batchHash, - } - orderByList := []string{ - "number ASC", - } - - blockTraces, err := p.blockTraceOrm.GetL2BlockInfos(whereFileds, orderByList, 0) - if err != nil { - log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err) - continue - } - if len(blockTraces) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) { - log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB", - "len(blockInfos)", len(blockTraces), - "expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) - continue - } - - batchData, err := p.generateBatchData(parentBatch, blockTraces) - if err != nil { - continue - } - if batchData.Hash().Hex() != batchHash { - log.Error("the hash from recovered batch data mismatches the DB entry", - "recovered_batch_hash", batchData.Hash().Hex(), - "expected", batchHash) - continue - } - - p.batchDataBuffer = append(p.batchDataBuffer, batchData) +// NewBatchProposer creates a new BatchProposer instance. +func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer { + return &BatchProposer{ + ctx: ctx, + db: db, + batchOrm: orm.NewBatch(db), + chunkOrm: orm.NewChunk(db), + l2Block: orm.NewL2Block(db), + maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch, + maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch, + maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch, + minChunkNumPerBatch: cfg.MinChunkNumPerBatch, + batchTimeoutSec: cfg.BatchTimeoutSec, } } -// TryProposeBatch will try to propose a batch. +// TryProposeBatch tries to propose a new batches. func (p *BatchProposer) TryProposeBatch() { - p.mutex.Lock() - defer p.mutex.Unlock() - - for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit { - orderBy := []string{"number ASC"} - blockTraces, err := p.blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, orderBy, int(p.batchBlocksLimit)) - if err != nil { - log.Error("failed to get unbatched blocks", "err", err) - return - } - - batchCreated := p.proposeBatch(blockTraces) - - // while size of batchDataBuffer < commitCalldataMinSize, - // proposer keeps fetching and porposing batches. - if p.getBatchDataBufferSize() >= p.commitCalldataMinSize { - return - } - - if !batchCreated { - // wait for watcher to insert l2 traces. - time.Sleep(time.Second) - } - } -} - -// TryCommitBatches will try to commit the pending batches. -func (p *BatchProposer) TryCommitBatches() { - p.mutex.Lock() - defer p.mutex.Unlock() - - if len(p.batchDataBuffer) == 0 { + dbChunks, err := p.proposeBatchChunks() + if err != nil { + log.Error("proposeBatchChunks failed", "err", err) return } - - // estimate the calldata length to determine whether to commit the pending batches - index := 0 - commit := false - calldataByteLen := uint64(0) - for ; index < len(p.batchDataBuffer) && index < p.commitBatchCountLimit; index++ { - calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch) - if calldataByteLen > p.commitCalldataSizeLimit { - commit = true - if index == 0 { - log.Warn( - "The calldata size of one batch is larger than the threshold", - "batch_hash", p.batchDataBuffer[0].Hash().Hex(), - "calldata_size", calldataByteLen, - ) - index = 1 - } - break - } - } - if !commit && p.batchDataBuffer[0].Timestamp()+p.batchCommitTimeSec > uint64(time.Now().Unix()) { - return - } - - // Send commit tx for batchDataBuffer[0:index] - log.Info("Commit batches", "start_index", p.batchDataBuffer[0].Batch.BatchIndex, - "end_index", p.batchDataBuffer[index-1].Batch.BatchIndex) - err := p.relayer.SendCommitTx(p.batchDataBuffer[:index]) - if err != nil { - // leave the retry to the next ticker - log.Error("SendCommitTx failed", "error", err) - } else { - // pop the processed batches from the buffer - bridgeL2BatchesCommitsSentTotalCounter.Inc(1) - p.batchDataBuffer = p.batchDataBuffer[index:] + if err := p.updateBatchInfoInDB(dbChunks); err != nil { + log.Error("update batch info in db failed", "err", err) } } -func (p *BatchProposer) proposeBatch(blockTraces []orm.BlockTrace) bool { - if len(blockTraces) == 0 { - return false +func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error { + numChunks := len(dbChunks) + if numChunks <= 0 { + return nil + } + chunks, err := p.dbChunksToBridgeChunks(dbChunks) + if err != nil { + return err } - approximatePayloadSize := func(hash string) (uint64, error) { - traces, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash}) + startChunkIndex := dbChunks[0].Index + startChunkHash := dbChunks[0].Hash + endChunkIndex := dbChunks[numChunks-1].Index + endChunkHash := dbChunks[numChunks-1].Hash + err = p.db.Transaction(func(dbTX *gorm.DB) error { + var batchHash string + batchHash, err = p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX) if err != nil { - return 0, err + return err } - if len(traces) != 1 { - return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces)) - } - size := 0 - for _, tx := range traces[0].Transactions { - size += len(tx.Data) - } - return uint64(size), nil - } - - firstSize, err := approximatePayloadSize(blockTraces[0].Hash) - if err != nil { - log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err) - return false - } - - if firstSize > p.commitCalldataSizeLimit { - log.Warn("oversized payload even for only 1 block", "height", blockTraces[0].Number, "size", firstSize) - // note: we should probably fail here once we can ensure this will not happen - if err := p.createBatchForBlocks(blockTraces[:1]); err != nil { - log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err) - return false - } - bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum)) - bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed)) - bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) - bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) - bridgeL2BatchesOversizedTotalCounter.Inc(1) - return true - } - - if blockTraces[0].GasUsed > p.batchGasThreshold { - bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1) - log.Warn("gas overflow even for only 1 block", "height", blockTraces[0].Number, "gas", blockTraces[0].GasUsed) - if err := p.createBatchForBlocks(blockTraces[:1]); err != nil { - log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err) - } else { - bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum)) - bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed)) - bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) - bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) - } - return true - } - - if blockTraces[0].TxNum > p.batchTxNumThreshold { - bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1) - log.Warn("too many txs even for only 1 block", "height", blockTraces[0].Number, "tx_num", blockTraces[0].TxNum) - if err := p.createBatchForBlocks(blockTraces[:1]); err != nil { - log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err) - } else { - bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum)) - bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed)) - bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize)) - bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1) - } - return true - } - - var gasUsed, txNum, payloadSize uint64 - reachThreshold := false - // add blocks into batch until reach batchGasThreshold - for i, block := range blockTraces { - size, err := approximatePayloadSize(block.Hash) + err = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batchHash, dbTX) if err != nil { - log.Error("failed to create batch", "number", block.Number, "err", err) - return false + return err } - - if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) { - blockTraces = blockTraces[:i] - reachThreshold = true - break - } - gasUsed += block.GasUsed - txNum += block.TxNum - payloadSize += size - } - - // if too few gas gathered, but we don't want to halt, we then check the first block in the batch: - // if it's not old enough we will skip proposing the batch, - // otherwise we will still propose a batch - if !reachThreshold && blockTraces[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) { - return false - } - - if err := p.createBatchForBlocks(blockTraces); err != nil { - log.Error("failed to create batch", "from", blockTraces[0].Number, "to", blockTraces[len(blockTraces)-1].Number, "err", err) - } else { - bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum)) - bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed)) - bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(payloadSize)) - bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blockTraces))) - } - - return true + return nil + }) + return err } -func (p *BatchProposer) createBatchForBlocks(blocks []orm.BlockTrace) error { - lastBatch, err := p.blockBatchOrm.GetLatestBatch() +func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) { + dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx) if err != nil { - // We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block. - return err + return nil, err } - batchData, err := p.generateBatchData(lastBatch, blocks) - if err != nil { - log.Error("createBatchData failed", "error", err) - return err + if len(dbChunks) == 0 { + log.Warn("No Unbatched Chunks") + return nil, nil } - if err := orm.AddBatchInfoToDB(p.db, batchData); err != nil { - log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err) - return err + firstChunk := dbChunks[0] + totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize + totalL1CommitGas := firstChunk.TotalL1CommitGas + var totalChunks uint64 = 1 + + // Check if the first chunk breaks hard limits. + // If so, it indicates there are bugs in chunk-proposer, manual fix is needed. + if totalL1CommitGas > p.maxL1CommitGasPerBatch { + return nil, fmt.Errorf( + "the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v", + firstChunk.StartBlockNumber, + firstChunk.EndBlockNumber, + totalL1CommitGas, + p.maxL1CommitGasPerBatch, + ) } - p.batchDataBuffer = append(p.batchDataBuffer, batchData) - return nil + if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch { + return nil, fmt.Errorf( + "the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v", + firstChunk.StartBlockNumber, + firstChunk.EndBlockNumber, + totalL1CommitCalldataSize, + p.maxL1CommitCalldataSizePerBatch, + ) + } + + for i, chunk := range dbChunks[1:] { + totalChunks++ + totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize + totalL1CommitGas += chunk.TotalL1CommitGas + if totalChunks > p.maxChunkNumPerBatch || + totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || + totalL1CommitGas > p.maxL1CommitGasPerBatch { + return dbChunks[:i+1], nil + } + } + + var hasChunkTimeout bool + currentTimeSec := uint64(time.Now().Unix()) + if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec { + log.Warn("first block timeout", + "start block number", dbChunks[0].StartBlockNumber, + "first block timestamp", dbChunks[0].StartBlockTime, + "chunk outdated time threshold", currentTimeSec, + ) + hasChunkTimeout = true + } + + if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch { + log.Warn("The payload size of the batch is less than the minimum limit", + "chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch, + ) + return nil, nil + } + return dbChunks, nil } -func (p *BatchProposer) generateBatchData(parentBatch *orm.BlockBatch, blocks []orm.BlockTrace) (*bridgeTypes.BatchData, error) { - var wrappedBlocks []*bridgeTypes.WrappedBlock - for _, block := range blocks { - trs, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash}) - if err != nil || len(trs) != 1 { - log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err) +func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) { + chunks := make([]*bridgeTypes.Chunk, len(dbChunks)) + for i, c := range dbChunks { + wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber) + if err != nil { + log.Error("Failed to fetch wrapped blocks", + "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err) return nil, err } - - wrappedBlocks = append(wrappedBlocks, trs[0]) + chunks[i] = &bridgeTypes.Chunk{ + Blocks: wrappedBlocks, + } } - - parentBatchInfo := bridgeTypes.BatchInfo{ - Index: parentBatch.Index, - Hash: parentBatch.Hash, - StateRoot: parentBatch.StateRoot, - } - return bridgeTypes.NewBatchData(&parentBatchInfo, wrappedBlocks, p.piCfg), nil -} - -func (p *BatchProposer) getBatchDataBufferSize() (size uint64) { - for _, batchData := range p.batchDataBuffer { - size += bridgeAbi.GetBatchCalldataLength(&batchData.Batch) - } - return + return chunks, nil } diff --git a/bridge/internal/controller/watcher/batch_proposer_test.go b/bridge/internal/controller/watcher/batch_proposer_test.go index 532a6d1c5..e6515417d 100644 --- a/bridge/internal/controller/watcher/batch_proposer_test.go +++ b/bridge/internal/controller/watcher/batch_proposer_test.go @@ -2,207 +2,71 @@ package watcher import ( "context" - "math" - "strings" "testing" - "time" - "github.com/agiledragon/gomonkey/v2" - "github.com/scroll-tech/go-ethereum/common" - gethTtypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "scroll-tech/common/types" "scroll-tech/bridge/internal/config" - "scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/orm" bridgeTypes "scroll-tech/bridge/internal/types" - bridgeUtils "scroll-tech/bridge/internal/utils" + "scroll-tech/bridge/internal/utils" ) -func testBatchProposerProposeBatch(t *testing.T) { +// TODO: Add unit tests that the limits are enforced correctly. +func testBatchProposer(t *testing.T) { db := setupDB(t) - defer bridgeUtils.CloseDB(db) + defer utils.CloseDB(db) - p := &BatchProposer{ - batchGasThreshold: 1000, - batchTxNumThreshold: 10, - batchTimeSec: 300, - commitCalldataSizeLimit: 500, - } + l2BlockOrm := orm.NewL2Block(db) + err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) - var blockTrace *orm.BlockTrace - patchGuard := gomonkey.ApplyMethodFunc(blockTrace, "GetL2WrappedBlocks", func(fields map[string]interface{}) ([]*bridgeTypes.WrappedBlock, error) { - hash, _ := fields["hash"].(string) - if hash == "blockWithLongData" { - longData := strings.Repeat("0", 1000) - return []*bridgeTypes.WrappedBlock{{ - Transactions: []*gethTtypes.TransactionData{{ - Data: longData, - }}, - }}, nil - } - return []*bridgeTypes.WrappedBlock{{ - Transactions: []*gethTtypes.TransactionData{{ - Data: "short", - }}, - }}, nil - }) - defer patchGuard.Reset() - patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error { - return nil - }) + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxTxGasPerChunk: 1000000000, + MaxL2TxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MinL1CommitCalldataSizePerChunk: 0, + ChunkTimeoutSec: 300, + }, db) + cp.TryProposeChunk() - block1 := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200} - block2 := orm.BlockTrace{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())} - block3 := orm.BlockTrace{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())} - block4 := orm.BlockTrace{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())} - blockOutdated := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())} - blockWithLongData := orm.BlockTrace{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())} + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxChunkNumPerBatch: 10, + MaxL1CommitGasPerBatch: 50000000000, + MaxL1CommitCalldataSizePerBatch: 1000000, + MinChunkNumPerBatch: 1, + BatchTimeoutSec: 300, + }, db) + bp.TryProposeBatch() - testCases := []struct { - description string - blocks []orm.BlockTrace - expectedRes bool - }{ - {"Empty block list", []orm.BlockTrace{}, false}, - {"Single block exceeding gas threshold", []orm.BlockTrace{block4}, true}, - {"Single block exceeding transaction number threshold", []orm.BlockTrace{block3}, true}, - {"Multiple blocks meeting thresholds", []orm.BlockTrace{block1, block2, block3}, true}, - {"Multiple blocks not meeting thresholds", []orm.BlockTrace{block1, block2}, false}, - {"Outdated and valid block", []orm.BlockTrace{blockOutdated, block2}, true}, - {"Single block with long data", []orm.BlockTrace{blockWithLongData}, true}, - } + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetUnbatchedChunks(context.Background()) + assert.NoError(t, err) + assert.Empty(t, chunks) - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description) - }) - } -} - -func testBatchProposerBatchGeneration(t *testing.T) { - db := setupDB(t) - subCtx, cancel := context.WithCancel(context.Background()) - defer func() { - bridgeUtils.CloseDB(db) - cancel() - }() - blockTraceOrm := orm.NewBlockTrace(db) - // Insert traces into db. - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock1})) - - l2cfg := cfg.L2Config - wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db) - loopToFetchEvent(subCtx, wc) - - blockBatchOrm := orm.NewBlockBatch(db) - batch, err := blockBatchOrm.GetLatestBatch() - assert.NoError(t, err) - - // Create a new batch. - batchData := bridgeTypes.NewBatchData(&bridgeTypes.BatchInfo{ - Index: 0, - Hash: batch.Hash, - StateRoot: batch.StateRoot, - }, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil) - - relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) - assert.NoError(t, err) - - proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - ProofGenerationFreq: 1, - BatchGasThreshold: 3000000, - BatchTxNumThreshold: 135, - BatchTimeSec: 1, - BatchBlocksLimit: 100, - CommitTxBatchCountLimit: 30, - }, relayer, db) - proposer.TryProposeBatch() - - infos, err := blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, []string{"number ASC"}, 100) - assert.NoError(t, err) - assert.Equal(t, 0, len(infos)) - - batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData.Hash().Hex()}, nil, 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(batches)) -} - -func testBatchProposerGracefulRestart(t *testing.T) { - db := setupDB(t) - defer bridgeUtils.CloseDB(db) - - relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig) - assert.NoError(t, err) - - blockTraceOrm := orm.NewBlockTrace(db) - // Insert traces into db. - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock2})) - - // Insert block batch into db. - parentBatch1 := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: common.Hash{}.String(), - StateRoot: common.Hash{}.String(), - } - batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil) - - parentBatch2 := &bridgeTypes.BatchInfo{ - Index: batchData1.Batch.BatchIndex, - Hash: batchData1.Hash().Hex(), - StateRoot: batchData1.Batch.NewStateRoot.String(), - } - batchData2 := bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil) - - blockBatchOrm := orm.NewBlockBatch(db) - err = db.Transaction(func(tx *gorm.DB) error { - _, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1) - if dbTxErr != nil { - return dbTxErr - } - _, dbTxErr = blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData2) - if dbTxErr != nil { - return dbTxErr - } - numbers1 := []uint64{batchData1.Batch.Blocks[0].BlockNumber} - hash1 := batchData1.Hash().Hex() - dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers1, hash1) - if dbTxErr != nil { - return dbTxErr - } - numbers2 := []uint64{batchData2.Batch.Blocks[0].BlockNumber} - hash2 := batchData2.Hash().Hex() - dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers2, hash2) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) - assert.NoError(t, err) - err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized) - assert.NoError(t, err) - batchHashes, err := blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32) - assert.NoError(t, err) - assert.Equal(t, 1, len(batchHashes)) - assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0]) - // test p.recoverBatchDataBuffer(). - _ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - ProofGenerationFreq: 1, - BatchGasThreshold: 3000000, - BatchTxNumThreshold: 135, - BatchTimeSec: 1, - BatchBlocksLimit: 100, - CommitTxBatchCountLimit: 30, - }, relayer, db) - - batchHashes, err = blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32) - assert.NoError(t, err) - assert.Equal(t, 0, len(batchHashes)) - - batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData2.Hash().Hex()}, nil, 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(batches)) + batchOrm := orm.NewBatch(db) + // get all batches. + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, 1) + assert.Equal(t, uint64(0), batches[0].StartChunkIndex) + assert.Equal(t, uint64(0), batches[0].EndChunkIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + + dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0) + assert.NoError(t, err) + assert.Len(t, batches, 1) + assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus)) + + blockOrm := orm.NewL2Block(db) + blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, blocks, 2) + assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash) + assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash) } diff --git a/bridge/internal/controller/watcher/chunk_proposer.go b/bridge/internal/controller/watcher/chunk_proposer.go new file mode 100644 index 000000000..b95d496b7 --- /dev/null +++ b/bridge/internal/controller/watcher/chunk_proposer.go @@ -0,0 +1,169 @@ +package watcher + +import ( + "context" + "fmt" + "time" + + "github.com/scroll-tech/go-ethereum/log" + "gorm.io/gorm" + + "scroll-tech/bridge/internal/config" + "scroll-tech/bridge/internal/orm" + bridgeTypes "scroll-tech/bridge/internal/types" +) + +// ChunkProposer proposes chunks based on available unchunked blocks. +type ChunkProposer struct { + ctx context.Context + db *gorm.DB + + chunkOrm *orm.Chunk + l2BlockOrm *orm.L2Block + + maxTxGasPerChunk uint64 + maxL2TxNumPerChunk uint64 + maxL1CommitGasPerChunk uint64 + maxL1CommitCalldataSizePerChunk uint64 + minL1CommitCalldataSizePerChunk uint64 + chunkTimeoutSec uint64 +} + +// NewChunkProposer creates a new ChunkProposer instance. +func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer { + return &ChunkProposer{ + ctx: ctx, + db: db, + chunkOrm: orm.NewChunk(db), + l2BlockOrm: orm.NewL2Block(db), + maxTxGasPerChunk: cfg.MaxTxGasPerChunk, + maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk, + maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk, + maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk, + minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk, + chunkTimeoutSec: cfg.ChunkTimeoutSec, + } +} + +// TryProposeChunk tries to propose a new chunk. +func (p *ChunkProposer) TryProposeChunk() { + proposedChunk, err := p.proposeChunk() + if err != nil { + log.Error("propose new chunk failed", "err", err) + return + } + + if err := p.updateChunkInfoInDB(proposedChunk); err != nil { + log.Error("update chunk info in orm failed", "err", err) + } +} + +func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error { + if chunk == nil { + log.Warn("proposed chunk is nil, cannot update in DB") + return nil + } + + err := p.db.Transaction(func(dbTX *gorm.DB) error { + dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX) + if err != nil { + return err + } + if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil { + log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", chunk.Hash, "start block", 0, "end block", 0, "err", err) + return err + } + return nil + }) + return err +} + +func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) { + blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx) + if err != nil { + return nil, err + } + + if len(blocks) == 0 { + log.Warn("no un-chunked blocks") + return nil, nil + } + + firstBlock := blocks[0] + totalTxGasUsed := firstBlock.Header.GasUsed + totalL2TxNum := firstBlock.L2TxsNum() + totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize() + totalL1CommitGas := firstBlock.EstimateL1CommitGas() + + // Check if the first block breaks hard limits. + // If so, it indicates there are bugs in sequencer, manual fix is needed. + if totalL2TxNum > p.maxL2TxNumPerChunk { + return nil, fmt.Errorf( + "the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v", + firstBlock.Header.Number, + totalL2TxNum, + p.maxL2TxNumPerChunk, + ) + } + + if totalTxGasUsed > p.maxTxGasPerChunk { + return nil, fmt.Errorf( + "the first block exceeds l2 tx gas limit; block number: %v, gas used: %v, max gas limit: %v", + firstBlock.Header.Number, + totalTxGasUsed, + p.maxTxGasPerChunk, + ) + } + + if totalL1CommitGas > p.maxL1CommitGasPerChunk { + return nil, fmt.Errorf( + "the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v", + firstBlock.Header.Number, + totalL1CommitGas, + p.maxL1CommitGasPerChunk, + ) + } + + if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk { + return nil, fmt.Errorf( + "the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v", + firstBlock.Header.Number, + totalL1CommitCalldataSize, + p.maxL1CommitCalldataSizePerChunk, + ) + } + + for i, block := range blocks[1:] { + totalTxGasUsed += block.Header.GasUsed + totalL2TxNum += block.L2TxsNum() + totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize() + totalL1CommitGas += block.EstimateL1CommitGas() + if totalTxGasUsed > p.maxTxGasPerChunk || + totalL2TxNum > p.maxL2TxNumPerChunk || + totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk || + totalL1CommitGas > p.maxL1CommitGasPerChunk { + blocks = blocks[:i+1] + break + } + } + + var hasBlockTimeout bool + currentTimeSec := uint64(time.Now().Unix()) + if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec { + log.Warn("first block timeout", + "block number", blocks[0].Header.Number, + "block timestamp", blocks[0].Header.Time, + "block outdated time threshold", currentTimeSec, + ) + hasBlockTimeout = true + } + + if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk { + log.Warn("The calldata size of the chunk is less than the minimum limit", + "totalL1CommitCalldataSize", totalL1CommitCalldataSize, + "minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk, + ) + return nil, nil + } + return &bridgeTypes.Chunk{Blocks: blocks}, nil +} diff --git a/bridge/internal/controller/watcher/chunk_proposer_test.go b/bridge/internal/controller/watcher/chunk_proposer_test.go new file mode 100644 index 000000000..ade4db5fa --- /dev/null +++ b/bridge/internal/controller/watcher/chunk_proposer_test.go @@ -0,0 +1,45 @@ +package watcher + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "scroll-tech/bridge/internal/config" + "scroll-tech/bridge/internal/orm" + bridgeTypes "scroll-tech/bridge/internal/types" + "scroll-tech/bridge/internal/utils" +) + +// TODO: Add unit tests that the limits are enforced correctly. +func testChunkProposer(t *testing.T) { + db := setupDB(t) + defer utils.CloseDB(db) + + l2BlockOrm := orm.NewL2Block(db) + err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxTxGasPerChunk: 1000000000, + MaxL2TxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MinL1CommitCalldataSizePerChunk: 0, + ChunkTimeoutSec: 300, + }, db) + cp.TryProposeChunk() + + expectedChunk := &bridgeTypes.Chunk{ + Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}, + } + expectedHash, err := expectedChunk.Hash(0) + assert.NoError(t, err) + + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetUnbatchedChunks(context.Background()) + assert.NoError(t, err) + assert.Len(t, chunks, 1) + assert.Equal(t, expectedHash.Hex(), chunks[0].Hash) +} diff --git a/bridge/internal/controller/watcher/l1_watcher.go b/bridge/internal/controller/watcher/l1_watcher.go index 9e906ed2c..bbe6f3a1f 100644 --- a/bridge/internal/controller/watcher/l1_watcher.go +++ b/bridge/internal/controller/watcher/l1_watcher.go @@ -43,7 +43,7 @@ type L1WatcherClient struct { l1MessageOrm *orm.L1Message l2MessageOrm *orm.L2Message l1BlockOrm *orm.L1Block - l1BatchOrm *orm.BlockBatch + batchOrm *orm.Batch // The number of new blocks to wait for a block to be confirmed confirmations rpc.BlockNumber @@ -90,7 +90,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig client: client, l1MessageOrm: l1MessageOrm, l1BlockOrm: l1BlockOrm, - l1BatchOrm: orm.NewBlockBatch(db), + batchOrm: orm.NewBatch(db), l2MessageOrm: orm.NewL2Message(db), confirmations: confirmations, @@ -245,7 +245,7 @@ func (w *L1WatcherClient) FetchContractEvent() error { for _, event := range rollupEvents { batchHashes = append(batchHashes, event.batchHash.String()) } - statuses, err := w.l1BatchOrm.GetRollupStatusByHashList(batchHashes) + statuses, err := w.batchOrm.GetRollupStatusByHashList(w.ctx, batchHashes) if err != nil { log.Error("Failed to GetRollupStatusByHashList", "err", err) return err @@ -261,9 +261,9 @@ func (w *L1WatcherClient) FetchContractEvent() error { // only update when db status is before event status if event.status > status { if event.status == types.RollupFinalized { - err = w.l1BatchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) + err = w.batchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) } else if event.status == types.RollupCommitted { - err = w.l1BatchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) + err = w.batchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status) } if err != nil { log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err) @@ -317,7 +317,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M msgHash := common.BytesToHash(crypto.Keccak256(event.Data)) l1Messages = append(l1Messages, &orm.L1Message{ - QueueIndex: event.QueueIndex.Uint64(), + QueueIndex: event.QueueIndex, MsgHash: msgHash.String(), Height: vLog.BlockNumber, Sender: event.Sender.String(), diff --git a/bridge/internal/controller/watcher/l1_watcher_test.go b/bridge/internal/controller/watcher/l1_watcher_test.go index 875360173..49538de50 100644 --- a/bridge/internal/controller/watcher/l1_watcher_test.go +++ b/bridge/internal/controller/watcher/l1_watcher_test.go @@ -163,7 +163,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) { return nil, nil, nil, targetErr }) err := watcher.FetchContractEvent() - assert.Equal(t, err.Error(), targetErr.Error()) + assert.EqualError(t, err, targetErr.Error()) }) patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) { @@ -195,10 +195,10 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) { return nil, relayedMessageEvents, rollupEvents, nil }) - var blockBatchOrm *orm.BlockBatch + var batchOrm *orm.Batch convey.Convey("db get rollup status by hash list failure", t, func() { targetErr := errors.New("get db failure") - patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { + patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) { return nil, targetErr }) err := watcher.FetchContractEvent() @@ -206,7 +206,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) { }) convey.Convey("rollup status mismatch batch hashes length", t, func() { - patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { + patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) { s := []commonTypes.RollupStatus{ commonTypes.RollupFinalized, } @@ -216,7 +216,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) { assert.NoError(t, err) }) - patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) { + patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) { s := []commonTypes.RollupStatus{ commonTypes.RollupPending, commonTypes.RollupCommitting, @@ -226,27 +226,27 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) { convey.Convey("db update RollupFinalized status failure", t, func() { targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure") - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { return targetErr }) err := watcher.FetchContractEvent() assert.Equal(t, targetErr.Error(), err.Error()) }) - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { return nil }) convey.Convey("db update RollupCommitted status failure", t, func() { targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure") - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { return targetErr }) err := watcher.FetchContractEvent() assert.Equal(t, targetErr.Error(), err.Error()) }) - patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { + patchGuard.ApplyMethodFunc(batchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error { return nil }) @@ -313,7 +313,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { convey.Convey("L1QueueTransactionEventSignature success", t, func() { patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error { tmpOut := out.(*bridgeAbi.L1QueueTransactionEvent) - tmpOut.QueueIndex = big.NewInt(100) + tmpOut.QueueIndex = 100 tmpOut.Data = []byte("test data") tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") tmpOut.Value = big.NewInt(1000) diff --git a/bridge/internal/controller/watcher/l2_watcher.go b/bridge/internal/controller/watcher/l2_watcher.go index 1ba655848..3ff8365d4 100644 --- a/bridge/internal/controller/watcher/l2_watcher.go +++ b/bridge/internal/controller/watcher/l2_watcher.go @@ -44,11 +44,12 @@ type L2WatcherClient struct { *ethclient.Client - db *gorm.DB - blockBatchOrm *orm.BlockBatch - blockTraceOrm *orm.BlockTrace - l1MessageOrm *orm.L1Message - l2MessageOrm *orm.L2Message + db *gorm.DB + l2BlockOrm *orm.L2Block + chunkOrm *orm.Chunk + batchOrm *orm.Batch + l1MessageOrm *orm.L1Message + l2MessageOrm *orm.L2Message confirmations rpc.BlockNumber @@ -79,8 +80,9 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat db: db, Client: client, - blockBatchOrm: orm.NewBlockBatch(db), - blockTraceOrm: orm.NewBlockTrace(db), + l2BlockOrm: orm.NewL2Block(db), + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), l1MessageOrm: orm.NewL1Message(db), l2MessageOrm: l2MessageOrm, processedMsgHeight: uint64(savedHeight), @@ -105,10 +107,10 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat } func (w *L2WatcherClient) initializeGenesis() error { - if count, err := w.blockBatchOrm.GetBatchCount(); err != nil { + if count, err := w.batchOrm.GetBatchCount(w.ctx); err != nil { return fmt.Errorf("failed to get batch count: %v", err) } else if count > 0 { - log.Info("genesis already imported") + log.Info("genesis already imported", "batch count", count) return nil } @@ -119,61 +121,74 @@ func (w *L2WatcherClient) initializeGenesis() error { log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) - blockTrace := &bridgeTypes.WrappedBlock{ - Header: genesis, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, - } - batchData := bridgeTypes.NewGenesisBatchData(blockTrace) - - if err = orm.AddBatchInfoToDB(w.db, batchData); err != nil { - log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err) - return err + chunk := &bridgeTypes.Chunk{ + Blocks: []*bridgeTypes.WrappedBlock{{ + Header: genesis, + Transactions: nil, + WithdrawTrieRoot: common.Hash{}, + }}, } - batchHash := batchData.Hash().Hex() + err = w.db.Transaction(func(dbTX *gorm.DB) error { + var dbChunk *orm.Chunk + dbChunk, err = w.chunkOrm.InsertChunk(w.ctx, chunk, dbTX) + if err != nil { + return fmt.Errorf("failed to insert chunk: %v", err) + } - if err = w.blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil { - return fmt.Errorf("failed to update genesis batch proving status: %v", err) + if err = w.chunkOrm.UpdateProvingStatus(w.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update genesis chunk proving status: %v", err) + } + + var batchHash string + batchHash, err = w.batchOrm.InsertBatch(w.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX) + if err != nil { + return fmt.Errorf("failed to insert batch: %v", err) + } + + if err = w.chunkOrm.UpdateBatchHashInRange(w.ctx, 0, 0, batchHash, dbTX); err != nil { + return fmt.Errorf("failed to update batch hash for chunks: %v", err) + } + + if err = w.batchOrm.UpdateProvingStatus(w.ctx, batchHash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update genesis batch proving status: %v", err) + } + + if err = w.batchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized, dbTX); err != nil { + return fmt.Errorf("failed to update genesis batch rollup status: %v", err) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("update genesis transaction failed: %v", err) } - if err = w.blockBatchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil { - return fmt.Errorf("failed to update genesis batch rollup status: %v", err) - } - - log.Info("successfully imported genesis batch") + log.Info("successfully imported genesis chunk and batch") return nil } const blockTracesFetchLimit = uint64(10) -// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent -func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) { - // Get newest block in DB. must have blocks at that time. - // Don't use "block_trace" table "trace" column's BlockTrace.Number, - // because it might be empty if the corresponding rollup_result is finalized/finalization_skipped - heightInDB, err := w.blockTraceOrm.GetL2BlocksLatestHeight() +// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks. +func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) { + heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx) if err != nil { log.Error("failed to GetL2BlocksLatestHeight", "err", err) return } - // Can't get trace from genesis block, so the default start number is 1. - var from = uint64(1) - if heightInDB > 0 { - from = uint64(heightInDB) + 1 - } - - for ; from <= blockHeight; from += blockTracesFetchLimit { + // Fetch and store block traces for missing blocks + for from := uint64(heightInDB) + 1; from <= blockHeight; from += blockTracesFetchLimit { to := from + blockTracesFetchLimit - 1 if to > blockHeight { to = blockHeight } - // Get block traces and insert into db. - if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil { + if err = w.getAndStoreBlockTraces(w.ctx, from, to); err != nil { log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err) return } @@ -229,7 +244,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u } if len(blocks) > 0 { - if err := w.blockTraceOrm.InsertWrappedBlocks(blocks); err != nil { + if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil { return fmt.Errorf("failed to batch insert BlockTraces: %v", err) } } diff --git a/bridge/internal/controller/watcher/l2_watcher_test.go b/bridge/internal/controller/watcher/l2_watcher_test.go index 0c41f49ac..ff2d2e98b 100644 --- a/bridge/internal/controller/watcher/l2_watcher_test.go +++ b/bridge/internal/controller/watcher/l2_watcher_test.go @@ -210,15 +210,15 @@ func testFetchRunningMissingBlocks(t *testing.T) { address, err := bind.WaitDeployed(context.Background(), l2Cli, tx) assert.NoError(t, err) - blockTraceOrm := orm.NewBlockTrace(db) + l2BlockOrm := orm.NewL2Block(db) ok := cutils.TryTimes(10, func() bool { latestHeight, err := l2Cli.BlockNumber(context.Background()) if err != nil { return false } wc := prepareWatcherClient(l2Cli, db, address) - wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight) - fetchedHeight, err := blockTraceOrm.GetL2BlocksLatestHeight() + wc.TryFetchRunningMissingBlocks(latestHeight) + fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) return err == nil && uint64(fetchedHeight) == latestHeight }) assert.True(t, ok) diff --git a/bridge/internal/controller/watcher/watcher_test.go b/bridge/internal/controller/watcher/watcher_test.go index 1d97a2ca6..6488615fe 100644 --- a/bridge/internal/controller/watcher/watcher_test.go +++ b/bridge/internal/controller/watcher/watcher_test.go @@ -115,8 +115,9 @@ func TestFunction(t *testing.T) { t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature) t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature) - // Run batch proposer test cases. - t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch) - t.Run("TestBatchProposerBatchGeneration", testBatchProposerBatchGeneration) - t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart) + // Run chunk-proposer test cases. + t.Run("TestChunkProposer", testChunkProposer) + + // Run batch-proposer test cases. + t.Run("TestBatchProposer", testBatchProposer) } diff --git a/bridge/internal/orm/batch.go b/bridge/internal/orm/batch.go new file mode 100644 index 000000000..4bdb5013a --- /dev/null +++ b/bridge/internal/orm/batch.go @@ -0,0 +1,378 @@ +package orm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "scroll-tech/common/types" + "scroll-tech/common/types/message" + + bridgeTypes "scroll-tech/bridge/internal/types" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/log" + "gorm.io/gorm" +) + +const defaultBatchHeaderVersion = 0 + +// Batch represents a batch of chunks. +type Batch struct { + db *gorm.DB `gorm:"column:-"` + + // batch + Index uint64 `json:"index" gorm:"column:index"` + Hash string `json:"hash" gorm:"column:hash"` + StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"` + StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"` + EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"` + EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"` + StateRoot string `json:"state_root" gorm:"column:state_root"` + WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"` + BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` + + // proof + ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` + Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` + ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` + ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` + ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` + + // rollup + RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` + CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"` + CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"` + FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"` + FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"` + + // gas oracle + OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"` + OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` + + // metadata + CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` + UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` + DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"` +} + +// NewBatch creates a new Batch database instance. +func NewBatch(db *gorm.DB) *Batch { + return &Batch{db: db} +} + +// TableName returns the table name for the Batch model. +func (*Batch) TableName() string { + return "batch" +} + +// GetBatches retrieves selected batches from the database. +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) { + db := o.db.WithContext(ctx) + + for key, value := range fields { + db = db.Where(key, value) + } + + for _, orderBy := range orderByList { + db = db.Order(orderBy) + } + + if limit > 0 { + db = db.Limit(limit) + } + + db = db.Order("index ASC") + + var batches []*Batch + if err := db.Find(&batches).Error; err != nil { + return nil, err + } + return batches, nil +} + +// GetBatchCount retrieves the total number of batches in the database. +func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) { + var count int64 + err := o.db.WithContext(ctx).Model(&Batch{}).Count(&count).Error + if err != nil { + return 0, err + } + return uint64(count), nil +} + +// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash. +func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) { + var batch Batch + err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error + if err != nil { + return nil, err + } + + var proof message.AggProof + err = json.Unmarshal(batch.Proof, &proof) + if err != nil { + return nil, err + } + + return &proof, nil +} + +// GetLatestBatch retrieves the latest batch from the database. +func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) { + var latestBatch Batch + err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error + if err != nil { + return nil, err + } + return &latestBatch, nil +} + +// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. +func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { + if len(hashes) == 0 { + return []types.RollupStatus{}, nil + } + + var batches []Batch + err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error + if err != nil { + return nil, err + } + + hashToStatusMap := make(map[string]types.RollupStatus) + for _, batch := range batches { + hashToStatusMap[batch.Hash] = types.RollupStatus(batch.RollupStatus) + } + + var statuses []types.RollupStatus + for _, hash := range hashes { + status, ok := hashToStatusMap[hash] + if !ok { + return nil, fmt.Errorf("hash not found in database: %s", hash) + } + statuses = append(statuses, status) + } + + return statuses, nil +} + +// GetPendingBatches retrieves pending batches up to the specified limit. +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, error) { + if limit <= 0 { + return nil, errors.New("limit must be greater than zero") + } + + var batches []*Batch + db := o.db.WithContext(ctx) + + db = db.Where("rollup_status = ?", types.RollupPending).Order("index ASC").Limit(limit) + + if err := db.Find(&batches).Error; err != nil { + return nil, err + } + return batches, nil +} + +// GetBatchByIndex retrieves the batch by the given index. +func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) { + var batch Batch + err := o.db.WithContext(ctx).Where("index = ?", index).First(&batch).Error + if err != nil { + return nil, err + } + return &batch, nil +} + +// InsertBatch inserts a new batch into the database. +func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (string, error) { + if len(chunks) == 0 { + return "", errors.New("invalid args") + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + parentBatch, err := o.GetLatestBatch(ctx) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + log.Error("failed to get the latest batch", "err", err) + return "", err + } + + var batchIndex uint64 + var parentBatchHash common.Hash + var totalL1MessagePoppedBefore uint64 + var version uint8 = defaultBatchHeaderVersion + + // if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's + // not batch record in the db, we then use default empty values for the creating batch; + // if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch + if parentBatch != nil { + batchIndex = parentBatch.Index + 1 + parentBatchHash = common.HexToHash(parentBatch.Hash) + + var parentBatchHeader *bridgeTypes.BatchHeader + parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader) + if err != nil { + log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err) + return "", err + } + + totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped() + version = parentBatchHeader.Version() + } + + batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks) + if err != nil { + log.Error("failed to create batch header", + "index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore, + "parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err) + return "", err + } + + numChunks := len(chunks) + lastChunkBlockNum := len(chunks[numChunks-1].Blocks) + + newBatch := Batch{ + Index: batchIndex, + Hash: batchHeader.Hash().Hex(), + StartChunkHash: startChunkHash, + StartChunkIndex: startChunkIndex, + EndChunkHash: endChunkHash, + EndChunkIndex: endChunkIndex, + StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(), + WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(), + BatchHeader: batchHeader.Encode(), + ProvingStatus: int16(types.ProvingTaskUnassigned), + RollupStatus: int16(types.RollupPending), + } + + if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil { + log.Error("failed to insert batch", "batch", newBatch, "err", err) + return "", err + } + + return newBatch.Hash, nil +} + +// UpdateSkippedBatches updates the skipped batches in the database. +func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) { + provingStatusList := []interface{}{ + int(types.ProvingTaskSkipped), + int(types.ProvingTaskFailed), + } + result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)). + Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped)) + if result.Error != nil { + return 0, result.Error + } + return uint64(result.RowsAffected), nil +} + +// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch. +func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { + updateFields := make(map[string]interface{}) + updateFields["oracle_status"] = int(status) + updateFields["oracle_tx_hash"] = txHash + if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateProvingStatus updates the proving status of a batch. +func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskProved, types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + default: + } + + if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateRollupStatus updates the rollup status of a batch. +func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + updateFields := make(map[string]interface{}) + updateFields["rollup_status"] = int(status) + + switch status { + case types.RollupCommitted: + updateFields["committed_at"] = time.Now() + case types.RollupFinalized: + updateFields["finalized_at"] = time.Now() + } + if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateCommitTxHashAndRollupStatus updates the commit transaction hash and rollup status for a batch. +func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["commit_tx_hash"] = commitTxHash + updateFields["rollup_status"] = int(status) + if status == types.RollupCommitted { + updateFields["committed_at"] = time.Now() + } + if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a batch. +func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["rollup_status"] = int(status) + if status == types.RollupFinalized { + updateFields["finalized_at"] = time.Now() + } + if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateProofByHash updates the block batch proof by hash. +// for unit test. +func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error { + proofBytes, err := json.Marshal(proof) + if err != nil { + return err + } + + updateFields := make(map[string]interface{}) + updateFields["proof"] = proofBytes + updateFields["proof_time_sec"] = proofTimeSec + err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error + return err +} diff --git a/bridge/internal/orm/block_batch.go b/bridge/internal/orm/block_batch.go deleted file mode 100644 index 25b54c934..000000000 --- a/bridge/internal/orm/block_batch.go +++ /dev/null @@ -1,314 +0,0 @@ -package orm - -import ( - "context" - "encoding/json" - "errors" - "time" - - "github.com/scroll-tech/go-ethereum/log" - "gorm.io/gorm" - - "scroll-tech/common/types" - "scroll-tech/common/types/message" - - bridgeTypes "scroll-tech/bridge/internal/types" -) - -// BlockBatch is structure of stored block batch message -type BlockBatch struct { - db *gorm.DB `gorm:"column:-"` - - Hash string `json:"hash" gorm:"column:hash"` - Index uint64 `json:"index" gorm:"column:index"` - StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"` - StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"` - EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"` - EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"` - ParentHash string `json:"parent_hash" gorm:"column:parent_hash"` - StateRoot string `json:"state_root" gorm:"column:state_root"` - TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"` - TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"` - TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"` - ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"` - Proof []byte `json:"proof" gorm:"column:proof"` - ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"` - RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"` - CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"` - OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"` - OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` - FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"` - CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"` - ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` - ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` - CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"` - FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"` -} - -// NewBlockBatch create an blockBatchOrm instance -func NewBlockBatch(db *gorm.DB) *BlockBatch { - return &BlockBatch{db: db} -} - -// TableName define the BlockBatch table name -func (*BlockBatch) TableName() string { - return "block_batch" -} - -// GetBatchCount get the batch count -func (o *BlockBatch) GetBatchCount() (int64, error) { - var count int64 - if err := o.db.Model(&BlockBatch{}).Count(&count).Error; err != nil { - return 0, err - } - return count, nil -} - -// GetBlockBatches get the select block batches -func (o *BlockBatch) GetBlockBatches(fields map[string]interface{}, orderByList []string, limit int) ([]BlockBatch, error) { - var blockBatches []BlockBatch - db := o.db - for key, value := range fields { - db = db.Where(key, value) - } - - for _, orderBy := range orderByList { - db = db.Order(orderBy) - } - - if limit != 0 { - db = db.Limit(limit) - } - - if err := db.Find(&blockBatches).Error; err != nil { - return nil, err - } - return blockBatches, nil -} - -// GetBlockBatchesHashByRollupStatus get the block batches by rollup status -func (o *BlockBatch) GetBlockBatchesHashByRollupStatus(status types.RollupStatus, limit int) ([]string, error) { - var blockBatches []BlockBatch - err := o.db.Select("hash").Where("rollup_status", int(status)).Order("index ASC").Limit(limit).Find(&blockBatches).Error - if err != nil { - return nil, err - } - - var hashes []string - for _, v := range blockBatches { - hashes = append(hashes, v.Hash) - } - return hashes, nil -} - -// GetVerifiedProofByHash get verified proof and instance comments by hash -func (o *BlockBatch) GetVerifiedProofByHash(hash string) (*message.AggProof, error) { - result := o.db.Model(&BlockBatch{}).Select("proof").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Row() - if result.Err() != nil { - return nil, result.Err() - } - - var proofBytes []byte - if err := result.Scan(&proofBytes); err != nil { - return nil, err - } - - var proof message.AggProof - if err := json.Unmarshal(proofBytes, &proof); err != nil { - return nil, err - } - - return &proof, nil -} - -// GetLatestBatch get the latest batch -// because we will `initializeGenesis()` when we start the `L2Watcher`, so a batch must exist. -func (o *BlockBatch) GetLatestBatch() (*BlockBatch, error) { - var blockBatch BlockBatch - err := o.db.Order("index DESC").Limit(1).First(&blockBatch).Error - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return nil, err - } - return &blockBatch, nil -} - -// GetLatestBatchByRollupStatus get the latest block batch by rollup status -func (o *BlockBatch) GetLatestBatchByRollupStatus(rollupStatuses []types.RollupStatus) (*BlockBatch, error) { - var tmpRollupStatus []int - for _, v := range rollupStatuses { - tmpRollupStatus = append(tmpRollupStatus, int(v)) - } - var blockBatch BlockBatch - err := o.db.Where("rollup_status IN (?)", tmpRollupStatus).Order("index DESC").Limit(1).First(&blockBatch).Error - if err != nil { - return nil, err - } - return &blockBatch, nil -} - -// GetRollupStatusByHashList get rollup status by hash list -func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) { - if len(hashes) == 0 { - return nil, nil - } - - var blockBatches []BlockBatch - err := o.db.Select("hash, rollup_status").Where("hash IN (?)", hashes).Find(&blockBatches).Error - if err != nil { - return nil, err - } - - var ( - statuses []types.RollupStatus - _statusMap = make(map[string]types.RollupStatus, len(hashes)) - ) - for _, _batch := range blockBatches { - _statusMap[_batch.Hash] = types.RollupStatus(_batch.RollupStatus) - } - for _, _hash := range hashes { - statuses = append(statuses, _statusMap[_hash]) - } - - return statuses, nil -} - -// InsertBlockBatchByBatchData insert a block batch data by the BatchData -func (o *BlockBatch) InsertBlockBatchByBatchData(tx *gorm.DB, batchData *bridgeTypes.BatchData) (int64, error) { - var db *gorm.DB - if tx != nil { - db = tx - } else { - db = o.db - } - - numBlocks := len(batchData.Batch.Blocks) - insertBlockBatch := BlockBatch{ - Hash: batchData.Hash().Hex(), - Index: batchData.Batch.BatchIndex, - StartBlockNumber: batchData.Batch.Blocks[0].BlockNumber, - StartBlockHash: batchData.Batch.Blocks[0].BlockHash.Hex(), - EndBlockNumber: batchData.Batch.Blocks[numBlocks-1].BlockNumber, - EndBlockHash: batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(), - ParentHash: batchData.Batch.ParentBatchHash.Hex(), - StateRoot: batchData.Batch.NewStateRoot.Hex(), - TotalTxNum: batchData.TotalTxNum, - TotalL1TxNum: batchData.TotalL1TxNum, - TotalL2Gas: batchData.TotalL2Gas, - CreatedAt: time.Now(), - } - result := db.Create(&insertBlockBatch) - if result.Error != nil { - log.Error("failed to insert block batch by batchData", "err", result.Error) - return 0, result.Error - } - return result.RowsAffected, nil -} - -// UpdateProvingStatus update the proving status -func (o *BlockBatch) UpdateProvingStatus(hash string, status types.ProvingStatus) error { - updateFields := make(map[string]interface{}) - updateFields["proving_status"] = int(status) - - switch status { - case types.ProvingTaskAssigned: - updateFields["prover_assigned_at"] = time.Now() - case types.ProvingTaskUnassigned: - updateFields["prover_assigned_at"] = nil - case types.ProvingTaskProved, types.ProvingTaskVerified: - updateFields["proved_at"] = time.Now() - default: - } - - if err := o.db.Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { - return err - } - return nil -} - -// UpdateRollupStatus update the rollup status -func (o *BlockBatch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error { - updateFields := make(map[string]interface{}) - updateFields["rollup_status"] = int(status) - - switch status { - case types.RollupCommitted: - updateFields["committed_at"] = time.Now() - case types.RollupFinalized: - updateFields["finalized_at"] = time.Now() - } - if err := o.db.Model(&BlockBatch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil { - return err - } - return nil -} - -// UpdateSkippedBatches update the skipped batches -func (o *BlockBatch) UpdateSkippedBatches() (int64, error) { - provingStatusList := []interface{}{ - int(types.ProvingTaskSkipped), - int(types.ProvingTaskFailed), - } - result := o.db.Model(&BlockBatch{}).Where("rollup_status", int(types.RollupCommitted)). - Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped)) - if result.Error != nil { - return 0, result.Error - } - return result.RowsAffected, nil -} - -// UpdateCommitTxHashAndRollupStatus update the commit tx hash and rollup status -func (o *BlockBatch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error { - updateFields := make(map[string]interface{}) - updateFields["commit_tx_hash"] = commitTxHash - updateFields["rollup_status"] = int(status) - if status == types.RollupCommitted { - updateFields["committed_at"] = time.Now() - } - if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { - return err - } - return nil -} - -// UpdateFinalizeTxHashAndRollupStatus update the finalize tx hash and rollup status -func (o *BlockBatch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error { - updateFields := make(map[string]interface{}) - updateFields["finalize_tx_hash"] = finalizeTxHash - updateFields["rollup_status"] = int(status) - if status == types.RollupFinalized { - updateFields["finalized_at"] = time.Now() - } - if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { - return err - } - return nil -} - -// UpdateL2GasOracleStatusAndOracleTxHash update the l2 gas oracle status and oracle tx hash -func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { - updateFields := make(map[string]interface{}) - updateFields["oracle_status"] = int(status) - updateFields["oracle_tx_hash"] = txHash - if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { - return err - } - return nil -} - -// UpdateProofByHash update the block batch proof by hash -// for unit test -func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error { - proofBytes, err := json.Marshal(proof) - if err != nil { - return err - } - - updateFields := make(map[string]interface{}) - updateFields["proof"] = proofBytes - updateFields["proof_time_sec"] = proofTimeSec - err = o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error - if err != nil { - log.Error("failed to update proof", "err", err) - } - return err -} diff --git a/bridge/internal/orm/block_trace.go b/bridge/internal/orm/block_trace.go deleted file mode 100644 index f95c0236a..000000000 --- a/bridge/internal/orm/block_trace.go +++ /dev/null @@ -1,155 +0,0 @@ -package orm - -import ( - "encoding/json" - - "github.com/scroll-tech/go-ethereum/log" - "gorm.io/gorm" - - "scroll-tech/bridge/internal/types" -) - -// BlockTrace is structure of stored block trace message -type BlockTrace struct { - db *gorm.DB `gorm:"column:-"` - - Number uint64 `json:"number" gorm:"number"` - Hash string `json:"hash" gorm:"hash"` - ParentHash string `json:"parent_hash" gorm:"parent_hash"` - Trace string `json:"trace" gorm:"column:trace"` - BatchHash string `json:"batch_hash" gorm:"batch_hash;default:NULL"` - TxNum uint64 `json:"tx_num" gorm:"tx_num"` - GasUsed uint64 `json:"gas_used" gorm:"gas_used"` - BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"` -} - -// NewBlockTrace create an blockTraceOrm instance -func NewBlockTrace(db *gorm.DB) *BlockTrace { - return &BlockTrace{db: db} -} - -// TableName define the BlockTrace table name -func (*BlockTrace) TableName() string { - return "block_trace" -} - -// GetL2BlocksLatestHeight get the l2 blocks latest height -func (o *BlockTrace) GetL2BlocksLatestHeight() (int64, error) { - result := o.db.Model(&BlockTrace{}).Select("COALESCE(MAX(number), -1)").Row() - if result.Err() != nil { - return -1, result.Err() - } - var maxNumber int64 - if err := result.Scan(&maxNumber); err != nil { - return -1, err - } - return maxNumber, nil -} - -// GetL2WrappedBlocks get the l2 wrapped blocks -func (o *BlockTrace) GetL2WrappedBlocks(fields map[string]interface{}) ([]*types.WrappedBlock, error) { - var blockTraces []BlockTrace - db := o.db.Select("trace") - for key, value := range fields { - db = db.Where(key, value) - } - if err := db.Find(&blockTraces).Error; err != nil { - return nil, err - } - - var wrappedBlocks []*types.WrappedBlock - for _, v := range blockTraces { - var wrappedBlock types.WrappedBlock - if err := json.Unmarshal([]byte(v.Trace), &wrappedBlock); err != nil { - break - } - wrappedBlocks = append(wrappedBlocks, &wrappedBlock) - } - return wrappedBlocks, nil -} - -// GetL2BlockInfos get l2 block infos -func (o *BlockTrace) GetL2BlockInfos(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) { - var blockTraces []BlockTrace - db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp") - for key, value := range fields { - db = db.Where(key, value) - } - - for _, orderBy := range orderByList { - db = db.Order(orderBy) - } - - if limit != 0 { - db = db.Limit(limit) - } - - if err := db.Find(&blockTraces).Error; err != nil { - return nil, err - } - return blockTraces, nil -} - -// GetUnbatchedL2Blocks get unbatched l2 blocks -func (o *BlockTrace) GetUnbatchedL2Blocks(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) { - var unbatchedBlockTraces []BlockTrace - db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp").Where("batch_hash is NULL") - for key, value := range fields { - db = db.Where(key, value) - } - if err := db.Find(&unbatchedBlockTraces).Error; err != nil { - return nil, err - } - return unbatchedBlockTraces, nil -} - -// InsertWrappedBlocks insert block to block trace -func (o *BlockTrace) InsertWrappedBlocks(blocks []*types.WrappedBlock) error { - var blockTraces []BlockTrace - for _, block := range blocks { - number := block.Header.Number.Uint64() - hash := block.Header.Hash().String() - txNum := len(block.Transactions) - mtime := block.Header.Time - gasCost := block.Header.GasUsed - - data, err := json.Marshal(block) - if err != nil { - log.Error("failed to marshal block", "hash", hash, "err", err) - return err - } - - tmpBlockTrace := BlockTrace{ - Number: number, - Hash: hash, - ParentHash: block.Header.ParentHash.String(), - Trace: string(data), - TxNum: uint64(txNum), - GasUsed: gasCost, - BlockTimestamp: mtime, - } - blockTraces = append(blockTraces, tmpBlockTrace) - } - - if err := o.db.Create(&blockTraces).Error; err != nil { - log.Error("failed to insert blockTraces", "err", err) - return err - } - return nil -} - -// UpdateBatchHashForL2Blocks update the batch_hash of block trace -func (o *BlockTrace) UpdateBatchHashForL2Blocks(tx *gorm.DB, numbers []uint64, batchHash string) error { - var db *gorm.DB - if tx != nil { - db = tx - } else { - db = o.db - } - - err := db.Model(&BlockTrace{}).Where("number IN (?)", numbers).Update("batch_hash", batchHash).Error - if err != nil { - return err - } - return nil -} diff --git a/bridge/internal/orm/chunk.go b/bridge/internal/orm/chunk.go new file mode 100644 index 000000000..a8641a3a3 --- /dev/null +++ b/bridge/internal/orm/chunk.go @@ -0,0 +1,218 @@ +package orm + +import ( + "context" + "errors" + "time" + + "scroll-tech/common/types" + + bridgeTypes "scroll-tech/bridge/internal/types" + + "github.com/scroll-tech/go-ethereum/log" + "gorm.io/gorm" +) + +// Chunk represents a chunk of blocks in the database. +type Chunk struct { + db *gorm.DB `gorm:"-"` + + // chunk + Index uint64 `json:"index" gorm:"column:index"` + Hash string `json:"hash" gorm:"column:hash"` + StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"` + StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"` + EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"` + EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"` + StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"` + TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"` + TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"` + + // proof + ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` + Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` + ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` + ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` + ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` + + // batch + BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` + + // metadata + TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` + TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` + TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` + TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` + CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` + UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` + DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"` +} + +// NewChunk creates a new Chunk database instance. +func NewChunk(db *gorm.DB) *Chunk { + return &Chunk{db: db} +} + +// TableName returns the table name for the chunk model. +func (*Chunk) TableName() string { + return "chunk" +} + +// GetChunksInRange retrieves chunks within a given range (inclusive) from the database. +// The range is closed, i.e., it includes both start and end indices. +// The returned chunks are sorted in ascending order by their index. +func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) { + if startIndex > endIndex { + return nil, errors.New("start index should be less than or equal to end index") + } + + var chunks []*Chunk + db := o.db.WithContext(ctx).Where("index >= ? AND index <= ?", startIndex, endIndex) + db = db.Order("index ASC") + + if err := db.Find(&chunks).Error; err != nil { + return nil, err + } + + if startIndex+uint64(len(chunks)) != endIndex+1 { + return nil, errors.New("number of chunks not expected in the specified range") + } + + return chunks, nil +} + +// GetUnbatchedChunks retrieves unbatched chunks from the database. +func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) { + var chunks []*Chunk + err := o.db.WithContext(ctx). + Where("batch_hash IS NULL"). + Order("index asc"). + Find(&chunks).Error + if err != nil { + return nil, err + } + return chunks, nil +} + +// GetLatestChunk retrieves the latest chunk from the database. +func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) { + var latestChunk Chunk + err := o.db.WithContext(ctx). + Order("index desc"). + First(&latestChunk).Error + if err != nil { + return nil, err + } + return &latestChunk, nil +} + +// InsertChunk inserts a new chunk into the database. +func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) { + if chunk == nil || len(chunk.Blocks) == 0 { + return nil, errors.New("invalid args") + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + var chunkIndex uint64 + var totalL1MessagePoppedBefore uint64 + parentChunk, err := o.GetLatestChunk(ctx) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + log.Error("failed to get latest chunk", "err", err) + return nil, err + } + + // if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's + // not chunk record in the db, we then use default empty values for the creating chunk; + // if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk + if parentChunk != nil { + chunkIndex = parentChunk.Index + 1 + totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk + } + + hash, err := chunk.Hash(totalL1MessagePoppedBefore) + if err != nil { + log.Error("failed to get chunk hash", "err", err) + return nil, err + } + + var totalL2TxGas uint64 + var totalL2TxNum uint64 + var totalL1CommitCalldataSize uint64 + var totalL1CommitGas uint64 + for _, block := range chunk.Blocks { + totalL2TxGas += block.Header.GasUsed + totalL2TxNum += block.L2TxsNum() + totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize() + totalL1CommitGas += block.EstimateL1CommitGas() + } + + numBlocks := len(chunk.Blocks) + newChunk := Chunk{ + Index: chunkIndex, + Hash: hash.Hex(), + StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(), + StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(), + EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(), + EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(), + TotalL2TxGas: totalL2TxGas, + TotalL2TxNum: totalL2TxNum, + TotalL1CommitCalldataSize: totalL1CommitCalldataSize, + TotalL1CommitGas: totalL1CommitGas, + StartBlockTime: chunk.Blocks[0].Header.Time, + TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore, + TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore), + ProvingStatus: int16(types.ProvingTaskUnassigned), + } + + if err := db.Create(&newChunk).Error; err != nil { + log.Error("failed to insert chunk", "hash", hash, "err", err) + return nil, err + } + + return &newChunk, nil +} + +// UpdateProvingStatus updates the proving status of a chunk. +func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskProved, types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + default: + } + + if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil { + return err + } + return nil +} + +// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive). +// The range is closed, i.e., it includes both start and end indices. +func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex) + + if err := db.Update("batch_hash", batchHash).Error; err != nil { + return err + } + return nil +} diff --git a/bridge/internal/orm/common.go b/bridge/internal/orm/common.go deleted file mode 100644 index 92f5ebdfb..000000000 --- a/bridge/internal/orm/common.go +++ /dev/null @@ -1,38 +0,0 @@ -package orm - -import ( - "errors" - - "gorm.io/gorm" - - bridgeTypes "scroll-tech/bridge/internal/types" -) - -// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash -// in all blocks included in the batch. -func AddBatchInfoToDB(db *gorm.DB, batchData *bridgeTypes.BatchData) error { - blockBatch := NewBlockBatch(db) - blockTrace := NewBlockTrace(db) - err := db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatch.InsertBlockBatchByBatchData(tx, batchData) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - - var blockIDs = make([]uint64, len(batchData.Batch.Blocks)) - for i, block := range batchData.Batch.Blocks { - blockIDs[i] = block.BlockNumber - } - - dbTxErr = blockTrace.UpdateBatchHashForL2Blocks(tx, blockIDs, batchData.Hash().Hex()) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) - return err -} diff --git a/bridge/internal/orm/l2_block.go b/bridge/internal/orm/l2_block.go new file mode 100644 index 000000000..0fc4485ce --- /dev/null +++ b/bridge/internal/orm/l2_block.go @@ -0,0 +1,208 @@ +package orm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + gethTypes "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" + "gorm.io/gorm" + + "scroll-tech/bridge/internal/types" +) + +// L2Block represents a l2 block in the database. +type L2Block struct { + db *gorm.DB `gorm:"column:-"` + + Number uint64 `json:"number" gorm:"number"` + Hash string `json:"hash" gorm:"hash"` + ParentHash string `json:"parent_hash" gorm:"parent_hash"` + Header string `json:"header" gorm:"header"` + Transactions string `json:"transactions" gorm:"transactions"` + WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"` + TxNum uint64 `json:"tx_num" gorm:"tx_num"` + GasUsed uint64 `json:"gas_used" gorm:"gas_used"` + BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"` + ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"` +} + +// NewL2Block creates a new L2Block instance +func NewL2Block(db *gorm.DB) *L2Block { + return &L2Block{db: db} +} + +// TableName returns the name of the "l2_block" table. +func (*L2Block) TableName() string { + return "l2_block" +} + +// GetL2BlocksLatestHeight retrieves the height of the latest L2 block. +// If the l2_block table is empty, it returns 0 to represent the genesis block height. +// In case of an error, it returns -1 along with the error. +func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) { + var maxNumber int64 + if err := o.db.WithContext(ctx).Model(&L2Block{}).Select("COALESCE(MAX(number), 0)").Row().Scan(&maxNumber); err != nil { + return -1, err + } + + return maxNumber, nil +} + +// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk. +// The returned blocks are sorted in ascending order by their block number. +func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) { + var l2Blocks []L2Block + if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root"). + Where("chunk_hash IS NULL"). + Order("number asc"). + Find(&l2Blocks).Error; err != nil { + return nil, err + } + + var wrappedBlocks []*types.WrappedBlock + for _, v := range l2Blocks { + var wrappedBlock types.WrappedBlock + + if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { + return nil, err + } + + wrappedBlock.Header = &gethTypes.Header{} + if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { + return nil, err + } + + wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) + wrappedBlocks = append(wrappedBlocks, &wrappedBlock) + } + + return wrappedBlocks, nil +} + +// GetL2Blocks retrieves selected L2Blocks from the database. +// The returned L2Blocks are sorted in ascending order by their block number. +func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) { + db := o.db.WithContext(ctx) + + for key, value := range fields { + db = db.Where(key, value) + } + + for _, orderBy := range orderByList { + db = db.Order(orderBy) + } + + if limit > 0 { + db = db.Limit(limit) + } + + db = db.Order("number ASC") + + var l2Blocks []*L2Block + if err := db.Find(&l2Blocks).Error; err != nil { + return nil, err + } + return l2Blocks, nil +} + +// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive). +// The range is closed, i.e., it includes both start and end block numbers. +// The returned blocks are sorted in ascending order by their block number. +func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) { + if startBlockNumber > endBlockNumber { + return nil, errors.New("start block number should be less than or equal to end block number") + } + + var l2Blocks []L2Block + db := o.db.WithContext(ctx) + db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber) + db = db.Order("number ASC") + + if err := db.Find(&l2Blocks).Error; err != nil { + return nil, err + } + + if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 { + return nil, errors.New("number of blocks not expected in the specified range") + } + + var wrappedBlocks []*types.WrappedBlock + for _, v := range l2Blocks { + var wrappedBlock types.WrappedBlock + + if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { + return nil, err + } + + wrappedBlock.Header = &gethTypes.Header{} + if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { + return nil, err + } + + wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) + wrappedBlocks = append(wrappedBlocks, &wrappedBlock) + } + + return wrappedBlocks, nil +} + +// InsertL2Blocks inserts l2 blocks into the "l2_block" table. +func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error { + var l2Blocks []L2Block + for _, block := range blocks { + header, err := json.Marshal(block.Header) + if err != nil { + log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err) + return err + } + + txs, err := json.Marshal(block.Transactions) + if err != nil { + log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err) + return err + } + + l2Block := L2Block{ + Number: block.Header.Number.Uint64(), + Hash: block.Header.Hash().String(), + ParentHash: block.Header.ParentHash.String(), + Transactions: string(txs), + WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(), + TxNum: uint64(len(block.Transactions)), + GasUsed: block.Header.GasUsed, + BlockTimestamp: block.Header.Time, + Header: string(header), + } + l2Blocks = append(l2Blocks, l2Block) + } + + if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil { + log.Error("failed to insert l2Blocks", "err", err) + return err + } + return nil +} + +// UpdateChunkHashInRange updates the chunk_hash of block tx within the specified range (inclusive). +// The range is closed, i.e., it includes both start and end indices. +// This function ensures the number of rows updated must equal to (endIndex - startIndex + 1). +// If the rows affected do not match this expectation, an error is returned. +func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, chunkHash string, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + db = db.WithContext(ctx).Model(&L2Block{}).Where("number >= ? AND number <= ?", startIndex, endIndex) + tx := db.Update("chunk_hash", chunkHash) + + if tx.RowsAffected != int64(endIndex-startIndex+1) { + return fmt.Errorf("expected %d rows to be updated, got %d", endIndex-startIndex+1, tx.RowsAffected) + } + + return tx.Error +} diff --git a/bridge/internal/orm/migrate/migrate_test.go b/bridge/internal/orm/migrate/migrate_test.go index 8d40c92ec..74cf7d2af 100644 --- a/bridge/internal/orm/migrate/migrate_test.go +++ b/bridge/internal/orm/migrate/migrate_test.go @@ -63,7 +63,7 @@ func testResetDB(t *testing.T) { cur, err := Current(pgDB.DB) assert.NoError(t, err) // total number of tables. - assert.Equal(t, 5, int(cur)) + assert.Equal(t, 6, int(cur)) } func testMigrate(t *testing.T) { diff --git a/bridge/internal/orm/migrate/migrations/00001_block_trace.sql b/bridge/internal/orm/migrate/migrations/00001_block_trace.sql deleted file mode 100644 index 2b8fc4454..000000000 --- a/bridge/internal/orm/migrate/migrations/00001_block_trace.sql +++ /dev/null @@ -1,38 +0,0 @@ --- +goose Up --- +goose StatementBegin - --- TODO: use foreign key for batch_id? --- TODO: why tx_num is bigint? -create table block_trace -( - number BIGINT NOT NULL, - hash VARCHAR NOT NULL, - parent_hash VARCHAR NOT NULL, - trace JSON NOT NULL, - batch_hash VARCHAR DEFAULT NULL, - tx_num INTEGER NOT NULL, - gas_used BIGINT NOT NULL, - block_timestamp NUMERIC NOT NULL -); - -create unique index block_trace_hash_uindex - on block_trace (hash); - -create unique index block_trace_number_uindex - on block_trace (number); - -create unique index block_trace_parent_uindex - on block_trace (number, parent_hash); - -create unique index block_trace_parent_hash_uindex - on block_trace (hash, parent_hash); - -create index block_trace_batch_hash_index - on block_trace (batch_hash); - --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin -drop table if exists block_trace; --- +goose StatementEnd diff --git a/bridge/internal/orm/migrate/migrations/00002_l1_message.sql b/bridge/internal/orm/migrate/migrations/00001_l1_message.sql similarity index 79% rename from bridge/internal/orm/migrate/migrations/00002_l1_message.sql rename to bridge/internal/orm/migrate/migrations/00001_l1_message.sql index bba72c857..99fec08b5 100644 --- a/bridge/internal/orm/migrate/migrations/00002_l1_message.sql +++ b/bridge/internal/orm/migrate/migrations/00001_l1_message.sql @@ -29,19 +29,6 @@ on l1_message (queue_index); create index l1_message_height_index on l1_message (height); -CREATE OR REPLACE FUNCTION update_timestamp() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated_at = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ language 'plpgsql'; - -CREATE TRIGGER update_timestamp BEFORE UPDATE -ON l1_message FOR EACH ROW EXECUTE PROCEDURE -update_timestamp(); - - -- +goose StatementEnd -- +goose Down diff --git a/bridge/internal/orm/migrate/migrations/00003_l2_message.sql b/bridge/internal/orm/migrate/migrations/00002_l2_message.sql similarity index 79% rename from bridge/internal/orm/migrate/migrations/00003_l2_message.sql rename to bridge/internal/orm/migrate/migrations/00002_l2_message.sql index fa2c06d0f..f4124c283 100644 --- a/bridge/internal/orm/migrate/migrations/00003_l2_message.sql +++ b/bridge/internal/orm/migrate/migrations/00002_l2_message.sql @@ -29,19 +29,6 @@ on l2_message (nonce); create index l2_message_height_index on l2_message (height); -CREATE OR REPLACE FUNCTION update_timestamp() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated_at = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ language 'plpgsql'; - -CREATE TRIGGER update_timestamp BEFORE UPDATE -ON l2_message FOR EACH ROW EXECUTE PROCEDURE -update_timestamp(); - - -- +goose StatementEnd -- +goose Down diff --git a/bridge/internal/orm/migrate/migrations/00005_l1_block.sql b/bridge/internal/orm/migrate/migrations/00003_l1_block.sql similarity index 100% rename from bridge/internal/orm/migrate/migrations/00005_l1_block.sql rename to bridge/internal/orm/migrate/migrations/00003_l1_block.sql diff --git a/bridge/internal/orm/migrate/migrations/00004_block_batch.sql b/bridge/internal/orm/migrate/migrations/00004_block_batch.sql deleted file mode 100644 index 4a9d3f12a..000000000 --- a/bridge/internal/orm/migrate/migrations/00004_block_batch.sql +++ /dev/null @@ -1,49 +0,0 @@ --- +goose Up --- +goose StatementBegin - -create table block_batch -( - hash VARCHAR NOT NULL, - index BIGINT NOT NULL, - start_block_number BIGINT NOT NULL, - start_block_hash VARCHAR NOT NULL, - end_block_number BIGINT NOT NULL, - end_block_hash VARCHAR NOT NULL, - parent_hash VARCHAR NOT NULL, - state_root VARCHAR NOT NULL, - total_tx_num BIGINT NOT NULL, - total_l1_tx_num BIGINT NOT NULL, - total_l2_gas BIGINT NOT NULL, - proving_status INTEGER DEFAULT 1, - proof BYTEA DEFAULT NULL, - proof_time_sec INTEGER DEFAULT 0, - rollup_status INTEGER DEFAULT 1, - commit_tx_hash VARCHAR DEFAULT NULL, - finalize_tx_hash VARCHAR DEFAULT NULL, - oracle_status INTEGER DEFAULT 1, - oracle_tx_hash VARCHAR DEFAULT NULL, - created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, - prover_assigned_at TIMESTAMP(0) DEFAULT NULL, - proved_at TIMESTAMP(0) DEFAULT NULL, - committed_at TIMESTAMP(0) DEFAULT NULL, - finalized_at TIMESTAMP(0) DEFAULT NULL -); - -comment -on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed'; -comment -on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed'; -comment -on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed'; - -create unique index block_batch_hash_uindex - on block_batch (hash); -create unique index block_batch_index_uindex - on block_batch (index); - --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin -drop table if exists block_batch; --- +goose StatementEnd diff --git a/bridge/internal/orm/migrate/migrations/00004_l2_block.sql b/bridge/internal/orm/migrate/migrations/00004_l2_block.sql new file mode 100644 index 000000000..36438c47f --- /dev/null +++ b/bridge/internal/orm/migrate/migrations/00004_l2_block.sql @@ -0,0 +1,32 @@ +-- +goose Up +-- +goose StatementBegin + +create table l2_block +( + number BIGINT NOT NULL, + hash VARCHAR NOT NULL, + parent_hash VARCHAR NOT NULL, + header TEXT NOT NULL, + transactions TEXT NOT NULL, + withdraw_trie_root VARCHAR NOT NULL, + tx_num INTEGER NOT NULL, + gas_used BIGINT NOT NULL, + block_timestamp NUMERIC NOT NULL, + chunk_hash VARCHAR DEFAULT NULL +); + +create unique index l2_block_hash_uindex + on l2_block (hash); + +create unique index l2_block_number_uindex + on l2_block (number); + +create index l2_block_chunk_hash_index + on l2_block (chunk_hash); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +drop table if exists l2_block; +-- +goose StatementEnd diff --git a/bridge/internal/orm/migrate/migrations/00005_chunk.sql b/bridge/internal/orm/migrate/migrations/00005_chunk.sql new file mode 100644 index 000000000..37e386ad8 --- /dev/null +++ b/bridge/internal/orm/migrate/migrations/00005_chunk.sql @@ -0,0 +1,54 @@ +-- +goose Up +-- +goose StatementBegin + +create table chunk +( +-- chunk + index BIGINT NOT NULL, + hash VARCHAR NOT NULL, + start_block_number BIGINT NOT NULL, + start_block_hash VARCHAR NOT NULL, + end_block_number BIGINT NOT NULL, + end_block_hash VARCHAR NOT NULL, + total_l1_messages_popped_before BIGINT NOT NULL, + total_l1_messages_popped_in_chunk BIGINT NOT NULL, + start_block_time BIGINT NOT NULL, + +-- proof + proving_status SMALLINT NOT NULL DEFAULT 1, + proof BYTEA DEFAULT NULL, + prover_assigned_at TIMESTAMP(0) DEFAULT NULL, + proved_at TIMESTAMP(0) DEFAULT NULL, + proof_time_sec SMALLINT DEFAULT NULL, + +-- batch + batch_hash VARCHAR DEFAULT NULL, + +-- metadata + total_l2_tx_gas BIGINT NOT NULL, + total_l2_tx_num BIGINT NOT NULL, + total_l1_commit_calldata_size BIGINT NOT NULL, + total_l1_commit_gas BIGINT NOT NULL, + created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + deleted_at TIMESTAMP(0) DEFAULT NULL +); + +comment +on column chunk.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed'; + +create unique index chunk_index_uindex +on chunk (index); + +create unique index chunk_hash_uindex +on chunk (hash); + +create index batch_hash_index +on chunk (batch_hash); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +drop table if exists chunk; +-- +goose StatementEnd diff --git a/bridge/internal/orm/migrate/migrations/00006_batch.sql b/bridge/internal/orm/migrate/migrations/00006_batch.sql new file mode 100644 index 000000000..1d74c82d9 --- /dev/null +++ b/bridge/internal/orm/migrate/migrations/00006_batch.sql @@ -0,0 +1,58 @@ +-- +goose Up +-- +goose StatementBegin + +create table batch +( +-- batch + index BIGINT NOT NULL, + hash VARCHAR NOT NULL, + start_chunk_index BIGINT NOT NULL, + start_chunk_hash VARCHAR NOT NULL, + end_chunk_index BIGINT NOT NULL, + end_chunk_hash VARCHAR NOT NULL, + state_root VARCHAR NOT NULL, + withdraw_root VARCHAR NOT NULL, + batch_header BYTEA NOT NULL, + +-- proof + proving_status SMALLINT NOT NULL DEFAULT 1, + proof BYTEA DEFAULT NULL, + prover_assigned_at TIMESTAMP(0) DEFAULT NULL, + proved_at TIMESTAMP(0) DEFAULT NULL, + proof_time_sec INTEGER DEFAULT NULL, + +-- rollup + rollup_status SMALLINT NOT NULL DEFAULT 1, + commit_tx_hash VARCHAR DEFAULT NULL, + committed_at TIMESTAMP(0) DEFAULT NULL, + finalize_tx_hash VARCHAR DEFAULT NULL, + finalized_at TIMESTAMP(0) DEFAULT NULL, + +-- gas oracle + oracle_status SMALLINT NOT NULL DEFAULT 1, + oracle_tx_hash VARCHAR DEFAULT NULL, + +-- metadata + created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, + deleted_at TIMESTAMP(0) DEFAULT NULL +); + +create unique index batch_index_uindex +on batch (index); + +create unique index batch_hash_uindex +on batch (hash); + +comment +on column batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed'; + +comment +on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed'; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +drop table if exists batch; +-- +goose StatementEnd diff --git a/bridge/internal/orm/orm_test.go b/bridge/internal/orm/orm_test.go new file mode 100644 index 000000000..066c0d635 --- /dev/null +++ b/bridge/internal/orm/orm_test.go @@ -0,0 +1,281 @@ +package orm + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/stretchr/testify/assert" + "gorm.io/gorm" + + "scroll-tech/common/docker" + "scroll-tech/common/types" + + "scroll-tech/bridge/internal/config" + "scroll-tech/bridge/internal/orm/migrate" + bridgeTypes "scroll-tech/bridge/internal/types" + "scroll-tech/bridge/internal/utils" +) + +var ( + base *docker.App + + db *gorm.DB + l2BlockOrm *L2Block + chunkOrm *Chunk + batchOrm *Batch + + wrappedBlock1 *bridgeTypes.WrappedBlock + wrappedBlock2 *bridgeTypes.WrappedBlock + chunk1 *bridgeTypes.Chunk + chunk2 *bridgeTypes.Chunk + chunkHash1 common.Hash + chunkHash2 common.Hash +) + +func TestMain(m *testing.M) { + t := &testing.T{} + setupEnv(t) + defer tearDownEnv(t) + m.Run() +} + +func setupEnv(t *testing.T) { + base = docker.NewDockerApp() + base.RunDBImage(t) + var err error + db, err = utils.InitDB( + &config.DBConfig{ + DSN: base.DBConfig.DSN, + DriverName: base.DBConfig.DriverName, + MaxOpenNum: base.DBConfig.MaxOpenNum, + MaxIdleNum: base.DBConfig.MaxIdleNum, + }, + ) + assert.NoError(t, err) + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + batchOrm = NewBatch(db) + chunkOrm = NewChunk(db) + l2BlockOrm = NewL2Block(db) + + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") + if err != nil { + t.Fatalf("failed to read file: %v", err) + } + wrappedBlock1 = &bridgeTypes.WrappedBlock{} + if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil { + t.Fatalf("failed to unmarshal block trace: %v", err) + } + + templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") + if err != nil { + t.Fatalf("failed to read file: %v", err) + } + wrappedBlock2 = &bridgeTypes.WrappedBlock{} + if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil { + t.Fatalf("failed to unmarshal block trace: %v", err) + } + + chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} + chunkHash1, err = chunk1.Hash(0) + assert.NoError(t, err) + + chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} + chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) + assert.NoError(t, err) +} + +func tearDownEnv(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + sqlDB.Close() + base.Free() +} + +func TestL2BlockOrm(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) + + height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) + assert.NoError(t, err) + assert.Equal(t, int64(3), height) + + blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background()) + assert.NoError(t, err) + assert.Len(t, blocks, 2) + assert.Equal(t, wrappedBlock1, blocks[0]) + assert.Equal(t, wrappedBlock2, blocks[1]) + + blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3) + assert.NoError(t, err) + assert.Len(t, blocks, 2) + assert.Equal(t, wrappedBlock1, blocks[0]) + assert.Equal(t, wrappedBlock2, blocks[1]) + + err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash") + assert.NoError(t, err) + + blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background()) + assert.NoError(t, err) + assert.Len(t, blocks, 1) + assert.Equal(t, wrappedBlock2, blocks[0]) +} + +func TestChunkOrm(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) + + dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) + assert.NoError(t, err) + assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) + + dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) + assert.NoError(t, err) + assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex()) + + chunks, err := chunkOrm.GetUnbatchedChunks(context.Background()) + assert.NoError(t, err) + assert.Len(t, chunks, 2) + assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash) + assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash) + + err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified) + assert.NoError(t, err) + err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned) + assert.NoError(t, err) + + chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1) + assert.NoError(t, err) + assert.Len(t, chunks, 2) + assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash) + assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus)) + assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus)) + + err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash") + assert.NoError(t, err) + chunks, err = chunkOrm.GetUnbatchedChunks(context.Background()) + assert.NoError(t, err) + assert.Len(t, chunks, 1) +} + +func TestBatchOrm(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) + + dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) + assert.NoError(t, err) + assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) + + dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) + assert.NoError(t, err) + assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex()) + + hash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1}) + assert.NoError(t, err) + + batch1, err := batchOrm.GetBatchByIndex(context.Background(), 0) + assert.NoError(t, err) + batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader) + assert.NoError(t, err) + batchHash1 := batchHeader1.Hash().Hex() + assert.Equal(t, hash1, batchHash1) + + hash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) + assert.NoError(t, err) + + batch2, err := batchOrm.GetBatchByIndex(context.Background(), 1) + assert.NoError(t, err) + batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader) + assert.NoError(t, err) + batchHash2 := batchHeader2.Hash().Hex() + assert.Equal(t, hash2, batchHash2) + + count, err := batchOrm.GetBatchCount(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), count) + + pendingBatches, err := batchOrm.GetPendingBatches(context.Background(), 100) + assert.NoError(t, err) + assert.Equal(t, 2, len(pendingBatches)) + + rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2}) + assert.NoError(t, err) + assert.Equal(t, 2, len(rollupStatus)) + assert.Equal(t, types.RollupPending, rollupStatus[0]) + assert.Equal(t, types.RollupPending, rollupStatus[1]) + + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped) + assert.NoError(t, err) + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted) + assert.NoError(t, err) + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed) + assert.NoError(t, err) + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted) + assert.NoError(t, err) + + count, err = batchOrm.UpdateSkippedBatches(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), count) + + count, err = batchOrm.UpdateSkippedBatches(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(0), count) + + batch, err := batchOrm.GetBatchByIndex(context.Background(), 1) + assert.NoError(t, err) + assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus)) + + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) + assert.NoError(t, err) + + dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1) + assert.Error(t, err, gorm.ErrRecordNotFound) + assert.Nil(t, dbProof) + + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) + assert.NoError(t, err) + err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized) + assert.NoError(t, err) + err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash") + assert.NoError(t, err) + + updatedBatch, err := batchOrm.GetLatestBatch(context.Background()) + assert.NoError(t, err) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus)) + assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus)) + assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus)) + assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash) + + err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted) + assert.NoError(t, err) + updatedBatch, err = batchOrm.GetLatestBatch(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash) + assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus)) + + err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed) + assert.NoError(t, err) + + updatedBatch, err = batchOrm.GetLatestBatch(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash) + assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus)) +} diff --git a/bridge/internal/types/batch.go b/bridge/internal/types/batch.go deleted file mode 100644 index d41300a0e..000000000 --- a/bridge/internal/types/batch.go +++ /dev/null @@ -1,236 +0,0 @@ -package types - -import ( - "bufio" - "bytes" - "encoding/binary" - "math/big" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/hexutil" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" - - abi "scroll-tech/bridge/abi" -) - -// PublicInputHashConfig is the configuration of how to compute the public input hash. -type PublicInputHashConfig struct { - MaxTxNum int `json:"max_tx_num"` - PaddingTxHash common.Hash `json:"padding_tx_hash"` -} - -const defaultMaxTxNum = 44 - -var defaultPaddingTxHash = [32]byte{} - -// BatchData contains info of batch to be committed. -type BatchData struct { - Batch abi.IScrollChainBatch - TxHashes []common.Hash - TotalTxNum uint64 - TotalL1TxNum uint64 - TotalL2Gas uint64 - - // cache for the BatchHash - hash *common.Hash - // The config to compute the public input hash, or the block hash. - // If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`. - piCfg *PublicInputHashConfig -} - -// Timestamp returns the timestamp of the first block in the BlockData. -func (b *BatchData) Timestamp() uint64 { - if len(b.Batch.Blocks) == 0 { - return 0 - } - return b.Batch.Blocks[0].Timestamp -} - -// Hash calculates the hash of this batch. -func (b *BatchData) Hash() *common.Hash { - if b.hash != nil { - return b.hash - } - - buf := make([]byte, 8) - hasher := crypto.NewKeccakState() - - // 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot - // @todo: panic on error here. - _, _ = hasher.Write(b.Batch.PrevStateRoot[:]) - _, _ = hasher.Write(b.Batch.NewStateRoot[:]) - _, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:]) - - // 2. hash all block contexts - for _, block := range b.Batch.Blocks { - // write BlockHash & ParentHash - _, _ = hasher.Write(block.BlockHash[:]) - _, _ = hasher.Write(block.ParentHash[:]) - // write BlockNumber - binary.BigEndian.PutUint64(buf, block.BlockNumber) - _, _ = hasher.Write(buf) - // write Timestamp - binary.BigEndian.PutUint64(buf, block.Timestamp) - _, _ = hasher.Write(buf) - // write BaseFee - var baseFee [32]byte - if block.BaseFee != nil { - baseFee = newByte32FromBytes(block.BaseFee.Bytes()) - } - _, _ = hasher.Write(baseFee[:]) - // write GasLimit - binary.BigEndian.PutUint64(buf, block.GasLimit) - _, _ = hasher.Write(buf) - // write NumTransactions - binary.BigEndian.PutUint16(buf[:2], block.NumTransactions) - _, _ = hasher.Write(buf[:2]) - // write NumL1Messages - binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages) - _, _ = hasher.Write(buf[:2]) - } - - // 3. add all tx hashes - for _, txHash := range b.TxHashes { - _, _ = hasher.Write(txHash[:]) - } - - // 4. append empty tx hash up to MaxTxNum - maxTxNum := defaultMaxTxNum - paddingTxHash := common.Hash(defaultPaddingTxHash) - if b.piCfg != nil { - maxTxNum = b.piCfg.MaxTxNum - paddingTxHash = b.piCfg.PaddingTxHash - } - for i := len(b.TxHashes); i < maxTxNum; i++ { - _, _ = hasher.Write(paddingTxHash[:]) - } - - b.hash = new(common.Hash) - _, _ = hasher.Read(b.hash[:]) - - return b.hash -} - -// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks -// included in this batch -func NewBatchData(parentBatch *BatchInfo, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData { - batchData := new(BatchData) - batch := &batchData.Batch - - // set BatchIndex, ParentBatchHash - batch.BatchIndex = parentBatch.Index + 1 - batch.ParentBatchHash = common.HexToHash(parentBatch.Hash) - batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks)) - - var batchTxDataBuf bytes.Buffer - batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf) - - for i, block := range blocks { - batchData.TotalTxNum += uint64(len(block.Transactions)) - batchData.TotalL2Gas += block.Header.GasUsed - - // set baseFee to 0 when it's nil in the block header - baseFee := block.Header.BaseFee - if baseFee == nil { - baseFee = big.NewInt(0) - } - - batch.Blocks[i] = abi.IScrollChainBlockContext{ - BlockHash: block.Header.Hash(), - ParentHash: block.Header.ParentHash, - BlockNumber: block.Header.Number.Uint64(), - Timestamp: block.Header.Time, - BaseFee: baseFee, - GasLimit: block.Header.GasLimit, - NumTransactions: uint16(len(block.Transactions)), - NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages - } - - // fill in RLP-encoded transactions - for _, txData := range block.Transactions { - data, _ := hexutil.Decode(txData.Data) - // right now we only support legacy tx - tx := types.NewTx(&types.LegacyTx{ - Nonce: txData.Nonce, - To: txData.To, - Value: txData.Value.ToInt(), - Gas: txData.Gas, - GasPrice: txData.GasPrice.ToInt(), - Data: data, - V: txData.V.ToInt(), - R: txData.R.ToInt(), - S: txData.S.ToInt(), - }) - rlpTxData, _ := tx.MarshalBinary() - var txLen [4]byte - binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData))) - _, _ = batchTxDataWriter.Write(txLen[:]) - _, _ = batchTxDataWriter.Write(rlpTxData) - batchData.TxHashes = append(batchData.TxHashes, tx.Hash()) - } - - if i == 0 { - batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot) - } - - // set NewStateRoot & WithdrawTrieRoot from the last block - if i == len(blocks)-1 { - batch.NewStateRoot = block.Header.Root - batch.WithdrawTrieRoot = block.WithdrawTrieRoot - } - } - - if err := batchTxDataWriter.Flush(); err != nil { - panic("Buffered I/O flush failed") - } - - batch.L2Transactions = batchTxDataBuf.Bytes() - batchData.piCfg = piCfg - - return batchData -} - -// NewGenesisBatchData generates the batch that contains the genesis block. -func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData { - header := genesisBlockTrace.Header - if header.Number.Uint64() != 0 { - panic("invalid genesis block trace: block number is not 0") - } - - batchData := new(BatchData) - batch := &batchData.Batch - - // fill in batch information - batch.BatchIndex = 0 - batch.Blocks = make([]abi.IScrollChainBlockContext, 1) - batch.NewStateRoot = header.Root - // PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0 - // L2Transactions should be empty - - // fill in block context - batch.Blocks[0] = abi.IScrollChainBlockContext{ - BlockHash: header.Hash(), - ParentHash: header.ParentHash, - BlockNumber: header.Number.Uint64(), - Timestamp: header.Time, - BaseFee: header.BaseFee, - GasLimit: header.GasLimit, - NumTransactions: 0, - NumL1Messages: 0, - } - - return batchData -} - -// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding -func newByte32FromBytes(b []byte) [32]byte { - var byte32 [32]byte - - if len(b) > 32 { - b = b[len(b)-32:] - } - - copy(byte32[32-len(b):], b) - return byte32 -} diff --git a/common/types/batch_header.go b/bridge/internal/types/batch_header.go similarity index 70% rename from common/types/batch_header.go rename to bridge/internal/types/batch_header.go index 4e797f2ed..44223091d 100644 --- a/common/types/batch_header.go +++ b/bridge/internal/types/batch_header.go @@ -6,6 +6,7 @@ import ( "math/big" "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" ) @@ -38,16 +39,16 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64 for _, chunk := range chunks { // build data hash totalL1MessagePoppedBeforeChunk := nextIndex - chunkBytes, err := chunk.Hash(totalL1MessagePoppedBeforeChunk) + chunkHash, err := chunk.Hash(totalL1MessagePoppedBeforeChunk) if err != nil { return nil, err } - dataBytes = append(dataBytes, chunkBytes...) + dataBytes = append(dataBytes, chunkHash.Bytes()...) // build skip bitmap for _, block := range chunk.Blocks { for _, tx := range block.Transactions { - if tx.Type != 0x7E { + if tx.Type != types.L1MessageTxType { continue } currentIndex := tx.Nonce @@ -101,6 +102,26 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64 }, nil } +// Version returns the version of the BatchHeader. +func (b *BatchHeader) Version() uint8 { + return b.version +} + +// BatchIndex returns the batch index of the BatchHeader. +func (b *BatchHeader) BatchIndex() uint64 { + return b.batchIndex +} + +// TotalL1MessagePopped returns the total number of L1 messages popped in the BatchHeader. +func (b *BatchHeader) TotalL1MessagePopped() uint64 { + return b.totalL1MessagePopped +} + +// SkippedL1MessageBitmap returns the skipped L1 message bitmap in the BatchHeader. +func (b *BatchHeader) SkippedL1MessageBitmap() []byte { + return b.skippedL1MessageBitmap +} + // Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding. func (b *BatchHeader) Encode() []byte { batchBytes := make([]byte, 89+len(b.skippedL1MessageBitmap)) @@ -118,3 +139,20 @@ func (b *BatchHeader) Encode() []byte { func (b *BatchHeader) Hash() common.Hash { return crypto.Keccak256Hash(b.Encode()) } + +// DecodeBatchHeader attempts to decode the given byte slice into a BatchHeader. +func DecodeBatchHeader(data []byte) (*BatchHeader, error) { + if len(data) < 89 { + return nil, fmt.Errorf("insufficient data for BatchHeader") + } + b := &BatchHeader{ + version: data[0], + batchIndex: binary.BigEndian.Uint64(data[1:9]), + l1MessagePopped: binary.BigEndian.Uint64(data[9:17]), + totalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]), + dataHash: common.BytesToHash(data[25:57]), + parentBatchHash: common.BytesToHash(data[57:89]), + skippedL1MessageBitmap: data[89:], + } + return b, nil +} diff --git a/common/types/batch_header_test.go b/bridge/internal/types/batch_header_test.go similarity index 85% rename from common/types/batch_header_test.go rename to bridge/internal/types/batch_header_test.go index f0ade6349..c8f53bee8 100644 --- a/common/types/batch_header_test.go +++ b/bridge/internal/types/batch_header_test.go @@ -11,7 +11,7 @@ import ( func TestNewBatchHeader(t *testing.T) { // Without L1 Msg - templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") assert.NoError(t, err) wrappedBlock := &WrappedBlock{} @@ -36,7 +36,7 @@ func TestNewBatchHeader(t *testing.T) { assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap)) // 1 L1 Msg in 1 bitmap - templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") + templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") assert.NoError(t, err) wrappedBlock2 := &WrappedBlock{} @@ -54,7 +54,7 @@ func TestNewBatchHeader(t *testing.T) { assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) // many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs - templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json") + templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_05.json") assert.NoError(t, err) wrappedBlock3 := &WrappedBlock{} @@ -87,7 +87,7 @@ func TestNewBatchHeader(t *testing.T) { assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) // many sparse L1 Msgs in 1 bitmap - templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json") + templateBlockTrace4, err := os.ReadFile("../../../common/testdata/blockTrace_06.json") assert.NoError(t, err) wrappedBlock4 := &WrappedBlock{} @@ -106,7 +106,7 @@ func TestNewBatchHeader(t *testing.T) { assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap)) // many L1 Msgs in each of 2 bitmaps - templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json") + templateBlockTrace5, err := os.ReadFile("../../../common/testdata/blockTrace_07.json") assert.NoError(t, err) wrappedBlock5 := &WrappedBlock{} @@ -127,7 +127,7 @@ func TestNewBatchHeader(t *testing.T) { func TestBatchHeaderEncode(t *testing.T) { // Without L1 Msg - templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") assert.NoError(t, err) wrappedBlock := &WrappedBlock{} @@ -154,7 +154,7 @@ func TestBatchHeaderEncode(t *testing.T) { assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes)) // With L1 Msg - templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") + templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") assert.NoError(t, err) wrappedBlock2 := &WrappedBlock{} @@ -174,7 +174,7 @@ func TestBatchHeaderEncode(t *testing.T) { func TestBatchHeaderHash(t *testing.T) { // Without L1 Msg - templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") assert.NoError(t, err) wrappedBlock := &WrappedBlock{} @@ -199,7 +199,7 @@ func TestBatchHeaderHash(t *testing.T) { hash := batchHeader.Hash() assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes())) - templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json") + templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") assert.NoError(t, err) wrappedBlock2 := &WrappedBlock{} @@ -216,7 +216,7 @@ func TestBatchHeaderHash(t *testing.T) { assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes())) // With L1 Msg - templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json") + templateBlockTrace3, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") assert.NoError(t, err) wrappedBlock3 := &WrappedBlock{} @@ -232,3 +232,20 @@ func TestBatchHeaderHash(t *testing.T) { hash = batchHeader.Hash() assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes())) } + +func TestBatchHeaderDecode(t *testing.T) { + header := &BatchHeader{ + version: 1, + batchIndex: 10, + l1MessagePopped: 20, + totalL1MessagePopped: 30, + dataHash: common.HexToHash("0x01"), + parentBatchHash: common.HexToHash("0x02"), + skippedL1MessageBitmap: []byte{0x01, 0x02, 0x03}, + } + + encoded := header.Encode() + decoded, err := DecodeBatchHeader(encoded) + assert.NoError(t, err) + assert.Equal(t, header, decoded) +} diff --git a/bridge/internal/types/batch_test.go b/bridge/internal/types/batch_test.go deleted file mode 100644 index e423e5225..000000000 --- a/bridge/internal/types/batch_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package types - -import ( - "math/big" - "testing" - - "github.com/scroll-tech/go-ethereum/common" - gethTypes "github.com/scroll-tech/go-ethereum/core/types" - "github.com/stretchr/testify/assert" - - abi "scroll-tech/bridge/abi" -) - -func TestBatchHash(t *testing.T) { - txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae") - tx := new(gethTypes.Transaction) - if err := tx.UnmarshalBinary(txBytes); err != nil { - t.Fatalf("invalid tx hex string: %s", err) - } - - batchData := new(BatchData) - batchData.TxHashes = append(batchData.TxHashes, tx.Hash()) - batchData.piCfg = &PublicInputHashConfig{ - MaxTxNum: 4, - PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"), - } - - batch := &batchData.Batch - batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe") - - block := abi.IScrollChainBlockContext{ - BlockNumber: 51966, - Timestamp: 123456789, - BaseFee: new(big.Int).SetUint64(0), - GasLimit: 10000000000000000, - NumTransactions: 1, - NumL1Messages: 0, - } - batch.Blocks = append(batch.Blocks, block) - - hash := batchData.Hash() - assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805")) - - // use a different tx hash - txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc") - tx = new(gethTypes.Transaction) - if err := tx.UnmarshalBinary(txBytes); err != nil { - t.Fatalf("invalid tx hex string: %s", err) - } - batchData.TxHashes[0] = tx.Hash() - - batchData.hash = nil // clear the cache - assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b")) -} - -func TestNewGenesisBatch(t *testing.T) { - genesisBlock := &gethTypes.Header{ - UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), - Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"), - TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), - ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), - Difficulty: big.NewInt(1), - Number: big.NewInt(0), - GasLimit: 940000000, - GasUsed: 0, - Time: 1639724192, - Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), - BaseFee: big.NewInt(1000000000), - } - assert.Equal( - t, - genesisBlock.Hash().Hex(), - "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c", - "wrong genesis block header", - ) - - blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}} - batchData := NewGenesisBatchData(blockTrace) - t.Log(batchData.Batch.Blocks[0]) - batchData.piCfg = &PublicInputHashConfig{ - MaxTxNum: 25, - PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"), - } - assert.Equal( - t, - batchData.Hash().Hex(), - "0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d", - "wrong genesis batch hash", - ) -} diff --git a/bridge/internal/types/block.go b/bridge/internal/types/block.go index 78d417365..5955cd5c3 100644 --- a/bridge/internal/types/block.go +++ b/bridge/internal/types/block.go @@ -1,10 +1,18 @@ package types import ( + "encoding/binary" + "errors" + "math" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/core/types" ) +const nonZeroByteGas uint64 = 16 +const zeroByteGas uint64 = 4 + // WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash. type WrappedBlock struct { Header *types.Header `json:"header"` @@ -13,9 +21,116 @@ type WrappedBlock struct { WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"` } -// BatchInfo contains the BlockBatch's main info -type BatchInfo struct { - Index uint64 `json:"index"` - Hash string `json:"hash"` - StateRoot string `json:"state_root"` +// NumL1Messages returns the number of L1 messages in this block. +// This number is the sum of included and skipped L1 messages. +func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 { + var lastQueueIndex *uint64 + for _, txData := range w.Transactions { + if txData.Type == types.L1MessageTxType { + lastQueueIndex = &txData.Nonce + } + } + if lastQueueIndex == nil { + return 0 + } + // note: last queue index included before this block is totalL1MessagePoppedBefore - 1 + // TODO: cache results + return *lastQueueIndex - totalL1MessagePoppedBefore + 1 +} + +// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding. +func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) { + bytes := make([]byte, 60) + + if !w.Header.Number.IsUint64() { + return nil, errors.New("block number is not uint64") + } + if len(w.Transactions) > math.MaxUint16 { + return nil, errors.New("number of transactions exceeds max uint16") + } + + numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore) + if numL1Messages > math.MaxUint16 { + return nil, errors.New("number of L1 messages exceeds max uint16") + } + + binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64()) + binary.BigEndian.PutUint64(bytes[8:], w.Header.Time) + // TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559. + binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit) + binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions))) + binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages)) + + return bytes, nil +} + +// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately. +// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk). +// This needs to be adjusted in the future. +func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 { + var size uint64 + for _, txData := range w.Transactions { + if txData.Type == types.L1MessageTxType { + continue + } + size += uint64(len(txData.Data)) + } + return size +} + +// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately. +// TODO: This will need to be adjusted. +// The part added here is only the calldata cost, +// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash. +func (w *WrappedBlock) EstimateL1CommitGas() uint64 { + var total uint64 + for _, txData := range w.Transactions { + if txData.Type == types.L1MessageTxType { + continue + } + data, _ := hexutil.Decode(txData.Data) + tx := types.NewTx(&types.LegacyTx{ + Nonce: txData.Nonce, + To: txData.To, + Value: txData.Value.ToInt(), + Gas: txData.Gas, + GasPrice: txData.GasPrice.ToInt(), + Data: data, + V: txData.V.ToInt(), + R: txData.R.ToInt(), + S: txData.S.ToInt(), + }) + rlpTxData, _ := tx.MarshalBinary() + + for _, b := range rlpTxData { + if b == 0 { + total += zeroByteGas + } else { + total += nonZeroByteGas + } + } + + var txLen [4]byte + binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData))) + + for _, b := range txLen { + if b == 0 { + total += zeroByteGas + } else { + total += nonZeroByteGas + } + } + } + return total +} + +// L2TxsNum calculates the number of l2 txs. +func (w *WrappedBlock) L2TxsNum() uint64 { + var count uint64 + for _, txData := range w.Transactions { + if txData.Type != types.L1MessageTxType { + count++ + } + } + return count } diff --git a/common/types/chunk.go b/bridge/internal/types/chunk.go similarity index 85% rename from common/types/chunk.go rename to bridge/internal/types/chunk.go index f2cc66b5a..3c455f695 100644 --- a/common/types/chunk.go +++ b/bridge/internal/types/chunk.go @@ -7,6 +7,7 @@ import ( "fmt" "strings" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" @@ -61,10 +62,13 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) { // Append rlp-encoded l2Txs for _, txData := range block.Transactions { - if txData.Type == 0x7E { + if txData.Type == types.L1MessageTxType { continue } - data, _ := hexutil.Decode(txData.Data) + data, err := hexutil.Decode(txData.Data) + if err != nil { + return nil, err + } // right now we only support legacy tx tx := types.NewTx(&types.LegacyTx{ Nonce: txData.Nonce, @@ -77,7 +81,10 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) { R: txData.R.ToInt(), S: txData.S.ToInt(), }) - rlpTxData, _ := tx.MarshalBinary() + rlpTxData, err := tx.MarshalBinary() + if err != nil { + return nil, err + } var txLen [4]byte binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData))) l2TxDataBytes = append(l2TxDataBytes, txLen[:]...) @@ -91,17 +98,17 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) { } // Hash hashes the Chunk into RollupV2 Chunk Hash -func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) ([]byte, error) { +func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) (common.Hash, error) { chunkBytes, err := c.Encode(totalL1MessagePoppedBefore) if err != nil { - return nil, err + return common.Hash{}, err } numBlocks := chunkBytes[0] // concatenate block contexts var dataBytes []byte for i := 0; i < int(numBlocks); i++ { - // only first 58 bytes is needed + // only the first 58 bytes of each BlockContext are needed for the hashing process dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...) } @@ -113,9 +120,9 @@ func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) ([]byte, error) { txHash := strings.TrimPrefix(txData.TxHash, "0x") hashBytes, err := hex.DecodeString(txHash) if err != nil { - return nil, err + return common.Hash{}, err } - if txData.Type == 0x7E { + if txData.Type == types.L1MessageTxType { l1TxHashes = append(l1TxHashes, hashBytes...) } else { l2TxHashes = append(l2TxHashes, hashBytes...) @@ -125,6 +132,6 @@ func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) ([]byte, error) { dataBytes = append(dataBytes, l2TxHashes...) } - hash := crypto.Keccak256Hash(dataBytes).Bytes() + hash := crypto.Keccak256Hash(dataBytes) return hash, nil } diff --git a/common/types/chunk_test.go b/bridge/internal/types/chunk_test.go similarity index 82% rename from common/types/chunk_test.go rename to bridge/internal/types/chunk_test.go index 9c3f99344..1ed1d44ed 100644 --- a/common/types/chunk_test.go +++ b/bridge/internal/types/chunk_test.go @@ -32,7 +32,7 @@ func TestChunkEncode(t *testing.T) { assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte") // Test case 3: when the chunk contains one block. - templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") assert.NoError(t, err) wrappedBlock := &WrappedBlock{} @@ -50,7 +50,7 @@ func TestChunkEncode(t *testing.T) { assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString) // Test case 4: when the chunk contains one block with 1 L1MsgTx - templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") + templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") assert.NoError(t, err) wrappedBlock2 := &WrappedBlock{} @@ -86,13 +86,13 @@ func TestChunkHash(t *testing.T) { chunk := &Chunk{ Blocks: []*WrappedBlock{}, } - bytes, err := chunk.Hash(0) - assert.Nil(t, bytes) + hash, err := chunk.Hash(0) assert.Error(t, err) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000000", hash.Hex()) assert.Contains(t, err.Error(), "number of blocks is 0") // Test case 2: successfully hashing a chunk on one block - templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json") + templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") assert.NoError(t, err) wrappedBlock := &WrappedBlock{} assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock)) @@ -101,13 +101,12 @@ func TestChunkHash(t *testing.T) { wrappedBlock, }, } - bytes, err = chunk.Hash(0) - hexString := hex.EncodeToString(bytes) + hash, err = chunk.Hash(0) assert.NoError(t, err) - assert.Equal(t, "78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hexString) + assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex()) // Test case 3: successfully hashing a chunk on two blocks - templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json") + templateBlockTrace1, err := os.ReadFile("../../../common/testdata/blockTrace_03.json") assert.NoError(t, err) wrappedBlock1 := &WrappedBlock{} assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1)) @@ -117,13 +116,12 @@ func TestChunkHash(t *testing.T) { wrappedBlock1, }, } - bytes, err = chunk.Hash(0) - hexString = hex.EncodeToString(bytes) + hash, err = chunk.Hash(0) assert.NoError(t, err) - assert.Equal(t, "aa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hexString) + assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex()) // Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs - templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json") + templateBlockTrace2, err := os.ReadFile("../../../common/testdata/blockTrace_04.json") assert.NoError(t, err) wrappedBlock2 := &WrappedBlock{} assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2)) @@ -133,8 +131,7 @@ func TestChunkHash(t *testing.T) { wrappedBlock2, }, } - bytes, err = chunk.Hash(0) - hexString = hex.EncodeToString(bytes) + hash, err = chunk.Hash(0) assert.NoError(t, err) - assert.Equal(t, "42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hexString) + assert.Equal(t, "0x42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hash.Hex()) } diff --git a/bridge/mock_bridge/MockBridgeL1.sol b/bridge/mock_bridge/MockBridgeL1.sol index 209ef609a..64352e8fa 100644 --- a/bridge/mock_bridge/MockBridgeL1.sol +++ b/bridge/mock_bridge/MockBridgeL1.sol @@ -1,6 +1,10 @@ // SPDX-License-Identifier: UNLICENSED pragma solidity ^0.8.0; +import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol"; +import {ChunkCodec} from "../../contracts/src/libraries/codec/ChunkCodec.sol"; +import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol"; + contract MockBridgeL1 { /****************************** * Events from L1MessageQueue * @@ -17,7 +21,7 @@ contract MockBridgeL1 { address indexed sender, address indexed target, uint256 value, - uint256 queueIndex, + uint64 queueIndex, uint256 gasLimit, bytes data ); @@ -46,74 +50,27 @@ contract MockBridgeL1 { /// @param messageHash The hash of the message. event RelayedMessage(bytes32 indexed messageHash); - /// @dev The maximum number of transaction in on batch. - uint256 public immutable maxNumTxInBatch; - - /// @dev The hash used for padding public inputs. - bytes32 public immutable paddingTxHash; - /*************************** * Events from ScrollChain * ***************************/ - /// @notice Emitted when a new batch is commited. - /// @param batchHash The hash of the batch + /// @notice Emitted when a new batch is committed. + /// @param batchHash The hash of the batch. event CommitBatch(bytes32 indexed batchHash); - /// @notice Emitted when a batch is reverted. - /// @param batchHash The identification of the batch. - event RevertBatch(bytes32 indexed batchHash); - /// @notice Emitted when a batch is finalized. /// @param batchHash The hash of the batch - event FinalizeBatch(bytes32 indexed batchHash); + /// @param stateRoot The state root in layer 2 after this batch. + /// @param withdrawRoot The merkle root in layer2 after this batch. + event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot); /*********** * Structs * ***********/ - struct BlockContext { - // The hash of this block. - bytes32 blockHash; - // The parent hash of this block. - bytes32 parentHash; - // The height of this block. - uint64 blockNumber; - // The timestamp of this block. - uint64 timestamp; - // The base fee of this block. - // Currently, it is not used, because we disable EIP-1559. - // We keep it for future proof. - uint256 baseFee; - // The gas limit of this block. - uint64 gasLimit; - // The number of transactions in this block, both L1 & L2 txs. - uint16 numTransactions; - // The number of l1 messages in this block. - uint16 numL1Messages; - } - - struct Batch { - // The list of blocks in this batch - BlockContext[] blocks; // MAX_NUM_BLOCKS = 100, about 5 min - // The state root of previous batch. - // The first batch will use 0x0 for prevStateRoot - bytes32 prevStateRoot; - // The state root of the last block in this batch. - bytes32 newStateRoot; - // The withdraw trie root of the last block in this batch. - bytes32 withdrawTrieRoot; - // The index of the batch. - uint64 batchIndex; - // The parent batch hash. - bytes32 parentBatchHash; - // Concatenated raw data of RLP encoded L2 txs - bytes l2Transactions; - } - struct L2MessageProof { - // The hash of the batch where the message belongs to. - bytes32 batchHash; + // The index of the batch where the message belongs to. + uint256 batchIndex; // Concatenation of merkle proof for withdraw merkle trie. bytes merkleProof; } @@ -125,14 +82,7 @@ contract MockBridgeL1 { /// @notice Message nonce, used to avoid relay attack. uint256 public messageNonce; - /*************** - * Constructor * - ***************/ - - constructor() { - maxNumTxInBatch = 44; - paddingTxHash = 0x0000000000000000000000000000000000000000000000000000000000000000; - } + mapping(uint256 => bytes32) public committedBatches; /*********************************** * Functions from L2GasPriceOracle * @@ -154,7 +104,7 @@ contract MockBridgeL1 { bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message); { address _sender = applyL1ToL2Alias(address(this)); - emit QueueTransaction(_sender, target, 0, messageNonce, gasLimit, _xDomainCalldata); + emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata); } emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message); @@ -178,37 +128,65 @@ contract MockBridgeL1 { * Functions from ScrollChain * ******************************/ - function commitBatch(Batch memory _batch) external { - _commitBatch(_batch); - } + function commitBatch( + uint8 /*version*/, + bytes calldata /*parentBatchHeader*/, + bytes[] memory chunks, + bytes calldata /*skippedL1MessageBitmap*/ + ) external { + // check whether the batch is empty + uint256 _chunksLength = chunks.length; + require(_chunksLength > 0, "batch is empty"); - function commitBatches(Batch[] memory _batches) external { - for (uint256 i = 0; i < _batches.length; i++) { - _commitBatch(_batches[i]); + uint256 dataPtr; + assembly { + dataPtr := mload(0x40) + mstore(0x40, add(dataPtr, mul(_chunksLength, 32))) } - } - function revertBatch(bytes32 _batchHash) external { - emit RevertBatch(_batchHash); + for (uint256 i = 0; i < _chunksLength; i++) { + _commitChunk(dataPtr, chunks[i]); + + unchecked { + dataPtr += 32; + } + } + + bytes32 _dataHash; + assembly { + let dataLen := mul(_chunksLength, 0x20) + _dataHash := keccak256(sub(dataPtr, dataLen), dataLen) + } + + bytes memory paddedData = new bytes(89); + assembly { + mstore(add(paddedData, 57), _dataHash) + } + + uint256 batchPtr; + assembly { + batchPtr := add(paddedData, 32) + } + bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89); + committedBatches[0] = _batchHash; + emit CommitBatch(_batchHash); } function finalizeBatchWithProof( - bytes32 _batchHash, - uint256[] memory, - uint256[] memory + bytes calldata /*batchHeader*/, + bytes32 /*prevStateRoot*/, + bytes32 postStateRoot, + bytes32 withdrawRoot, + bytes calldata /*aggrProof*/ ) external { - emit FinalizeBatch(_batchHash); + bytes32 _batchHash = committedBatches[0]; + emit FinalizeBatch(_batchHash, postStateRoot, withdrawRoot); } /********************** * Internal Functions * **********************/ - function _commitBatch(Batch memory _batch) internal { - bytes32 _batchHash = _computePublicInputHash(_batch); - emit CommitBatch(_batchHash); - } - /// @dev Internal function to generate the correct cross domain calldata for a message. /// @param _sender Message sender address. /// @param _target Target contract address. @@ -234,6 +212,10 @@ contract MockBridgeL1 { ); } + /// @notice Utility function that converts the address in the L1 that submitted a tx to + /// the inbox to the msg.sender viewed in the L2 + /// @param l1Address the address in the L1 that triggered the tx to L2 + /// @return l2Address L2 address as viewed in msg.sender function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) { uint160 offset = uint160(0x1111000000000000000000000000000000001111); unchecked { @@ -241,140 +223,67 @@ contract MockBridgeL1 { } } - /// @dev Internal function to compute the public input hash. - /// @param batch The batch to compute. - function _computePublicInputHash(Batch memory batch) - internal - view - returns ( - bytes32 - ) - { - uint256 publicInputsPtr; - // 1. append prevStateRoot, newStateRoot and withdrawTrieRoot to public inputs - { - bytes32 prevStateRoot = batch.prevStateRoot; - bytes32 newStateRoot = batch.newStateRoot; - bytes32 withdrawTrieRoot = batch.withdrawTrieRoot; - // number of bytes in public inputs: 32 * 3 + 124 * blocks + 32 * MAX_NUM_TXS - uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch; - assembly { - publicInputsPtr := mload(0x40) - mstore(0x40, add(publicInputsPtr, publicInputsSize)) - mstore(publicInputsPtr, prevStateRoot) - publicInputsPtr := add(publicInputsPtr, 0x20) - mstore(publicInputsPtr, newStateRoot) - publicInputsPtr := add(publicInputsPtr, 0x20) - mstore(publicInputsPtr, withdrawTrieRoot) - publicInputsPtr := add(publicInputsPtr, 0x20) + function _commitChunk( + uint256 memPtr, + bytes memory _chunk + ) internal pure { + uint256 chunkPtr; + uint256 startDataPtr; + uint256 dataPtr; + uint256 blockPtr; + + assembly { + dataPtr := mload(0x40) + startDataPtr := dataPtr + chunkPtr := add(_chunk, 0x20) // skip chunkLength + blockPtr := add(chunkPtr, 1) // skip numBlocks + } + + uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length); + + // concatenate block contexts + uint256 _totalTransactionsInChunk; + for (uint256 i = 0; i < _numBlocks; i++) { + dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i); + uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr); + unchecked { + _totalTransactionsInChunk += _numTransactionsInBlock; + blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH; } } - uint64 numTransactionsInBatch; - BlockContext memory _block; - // 2. append block information to public inputs. - for (uint256 i = 0; i < batch.blocks.length; i++) { - // validate blocks, we won't check first block against previous batch. - { - BlockContext memory _currentBlock = batch.blocks[i]; - if (i > 0) { - require(_block.blockHash == _currentBlock.parentHash, "Parent hash mismatch"); - require(_block.blockNumber + 1 == _currentBlock.blockNumber, "Block number mismatch"); - } - _block = _currentBlock; - } + assembly { + mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes + blockPtr := add(chunkPtr, 1) // reset block ptr + } - // append blockHash and parentHash to public inputs - { - bytes32 blockHash = _block.blockHash; - bytes32 parentHash = _block.parentHash; + // concatenate tx hashes + uint256 l2TxPtr = ChunkCodec.l2TxPtr(chunkPtr, _numBlocks); + while (_numBlocks > 0) { + // concatenate l2 transaction hashes + uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr); + for (uint256 j = 0; j < _numTransactionsInBlock; j++) { + bytes32 txHash; + (txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr); assembly { - mstore(publicInputsPtr, blockHash) - publicInputsPtr := add(publicInputsPtr, 0x20) - mstore(publicInputsPtr, parentHash) - publicInputsPtr := add(publicInputsPtr, 0x20) + mstore(dataPtr, txHash) + dataPtr := add(dataPtr, 0x20) } } - // append blockNumber and blockTimestamp to public inputs - { - uint256 blockNumber = _block.blockNumber; - uint256 blockTimestamp = _block.timestamp; - assembly { - mstore(publicInputsPtr, shl(192, blockNumber)) - publicInputsPtr := add(publicInputsPtr, 0x8) - mstore(publicInputsPtr, shl(192, blockTimestamp)) - publicInputsPtr := add(publicInputsPtr, 0x8) - } - } - // append baseFee to public inputs - { - uint256 baseFee = _block.baseFee; - assembly { - mstore(publicInputsPtr, baseFee) - publicInputsPtr := add(publicInputsPtr, 0x20) - } - } - uint64 numTransactionsInBlock = _block.numTransactions; - // gasLimit, numTransactions and numL1Messages to public inputs - { - uint256 gasLimit = _block.gasLimit; - uint256 numL1MessagesInBlock = _block.numL1Messages; - assembly { - mstore(publicInputsPtr, shl(192, gasLimit)) - publicInputsPtr := add(publicInputsPtr, 0x8) - mstore(publicInputsPtr, shl(240, numTransactionsInBlock)) - publicInputsPtr := add(publicInputsPtr, 0x2) - mstore(publicInputsPtr, shl(240, numL1MessagesInBlock)) - publicInputsPtr := add(publicInputsPtr, 0x2) - } - } - numTransactionsInBatch += numTransactionsInBlock; - } - require(numTransactionsInBatch <= maxNumTxInBatch, "Too many transactions in batch"); - // 3. append transaction hash to public inputs. - uint256 _l2TxnPtr; - { - bytes memory l2Transactions = batch.l2Transactions; - assembly { - _l2TxnPtr := add(l2Transactions, 0x20) - } - } - for (uint256 i = 0; i < batch.blocks.length; i++) { - uint256 numL1MessagesInBlock = batch.blocks[i].numL1Messages; - require(numL1MessagesInBlock == 0); - uint256 numTransactionsInBlock = batch.blocks[i].numTransactions; - for (uint256 j = numL1MessagesInBlock; j < numTransactionsInBlock; ++j) { - bytes32 hash; - assembly { - let txPayloadLength := shr(224, mload(_l2TxnPtr)) - _l2TxnPtr := add(_l2TxnPtr, 4) - _l2TxnPtr := add(_l2TxnPtr, txPayloadLength) - hash := keccak256(sub(_l2TxnPtr, txPayloadLength), txPayloadLength) - mstore(publicInputsPtr, hash) - publicInputsPtr := add(publicInputsPtr, 0x20) - } + unchecked { + _numBlocks -= 1; + blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH; } } - // 4. append padding transaction to public inputs. - bytes32 txHashPadding = paddingTxHash; - for (uint256 i = numTransactionsInBatch; i < maxNumTxInBatch; i++) { - assembly { - mstore(publicInputsPtr, txHashPadding) - publicInputsPtr := add(publicInputsPtr, 0x20) - } - } + // check chunk has correct length + require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data"); - // 5. compute public input hash - bytes32 publicInputHash; - { - uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch; - assembly { - publicInputHash := keccak256(sub(publicInputsPtr, publicInputsSize), publicInputsSize) - } + // compute data hash and store to memory + assembly { + let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr)) + mstore(memPtr, dataHash) } - - return publicInputHash; } } diff --git a/bridge/mock_bridge/MockBridgeL2.sol b/bridge/mock_bridge/MockBridgeL2.sol index a37e83756..e79e45676 100644 --- a/bridge/mock_bridge/MockBridgeL2.sol +++ b/bridge/mock_bridge/MockBridgeL2.sol @@ -11,24 +11,6 @@ contract MockBridgeL2 { /// @param messageHash The hash of the corresponding message. event AppendMessage(uint256 index, bytes32 messageHash); - /******************************** - * Events from L1BlockContainer * - ********************************/ - - /// @notice Emitted when a block is imported. - /// @param blockHash The hash of the imported block. - /// @param blockHeight The height of the imported block. - /// @param blockTimestamp The timestamp of the imported block. - /// @param baseFee The base fee of the imported block. - /// @param stateRoot The state root of the imported block. - event ImportBlock( - bytes32 indexed blockHash, - uint256 blockHeight, - uint256 blockTimestamp, - uint256 baseFee, - bytes32 stateRoot - ); - /********************************* * Events from L2ScrollMessenger * *********************************/ diff --git a/bridge/tests/bridge_test.go b/bridge/tests/bridge_test.go index 7f6665bdd..bf691d95b 100644 --- a/bridge/tests/bridge_test.go +++ b/bridge/tests/bridge_test.go @@ -63,9 +63,9 @@ func setupDB(t *testing.T) *gorm.DB { func TestMain(m *testing.M) { base = docker.NewDockerApp() bridgeApp = bcmd.NewBridgeApp(base, "../conf/config.json") + defer bridgeApp.Free() + defer base.Free() m.Run() - bridgeApp.Free() - base.Free() } func setupEnv(t *testing.T) { @@ -129,6 +129,10 @@ func prepareContracts(t *testing.T) { func TestFunction(t *testing.T) { setupEnv(t) + // process start test + t.Run("TestProcessStart", testProcessStart) + t.Run("TestProcessStartEnableMetrics", testProcessStartEnableMetrics) + // l1 rollup and watch rollup events t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch) @@ -136,7 +140,7 @@ func TestFunction(t *testing.T) { t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed) // l2 message - t.Run("TestRelayL2MessageSucceed", testRelayL2MessageSucceed) + // TODO: add a "user relay l2msg Succeed" test // l1/l2 gas oracle t.Run("TestImportL1GasPrice", testImportL1GasPrice) diff --git a/bridge/tests/gas_oracle_test.go b/bridge/tests/gas_oracle_test.go index 643881576..c00bb435a 100644 --- a/bridge/tests/gas_oracle_test.go +++ b/bridge/tests/gas_oracle_test.go @@ -8,7 +8,6 @@ import ( "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "scroll-tech/common/types" @@ -72,47 +71,37 @@ func testImportL2GasPrice(t *testing.T) { l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig) assert.NoError(t, err) - // add fake blocks - traces := []*bridgeTypes.WrappedBlock{ - { - Header: &gethTypes.Header{ - Number: big.NewInt(1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), + // add fake chunk + chunk := &bridgeTypes.Chunk{ + Blocks: []*bridgeTypes.WrappedBlock{ + { + Header: &gethTypes.Header{ + Number: big.NewInt(1), + ParentHash: common.Hash{}, + Difficulty: big.NewInt(0), + BaseFee: big.NewInt(0), + }, + Transactions: nil, + WithdrawTrieRoot: common.Hash{}, }, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, }, } + chunkHash, err := chunk.Hash(0) + assert.NoError(t, err) - blockTraceOrm := orm.NewBlockTrace(db) - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces)) - - parentBatch := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: "0x0000000000000000000000000000000000000000", - } - batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, l2Cfg.BatchProposerConfig.PublicInputConfig) - blockBatchOrm := orm.NewBlockBatch(db) - err = db.Transaction(func(tx *gorm.DB) error { - _, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk}) assert.NoError(t, err) // check db status - batch, err := blockBatchOrm.GetLatestBatch() + batch, err := batchOrm.GetLatestBatch(context.Background()) assert.NoError(t, err) assert.Empty(t, batch.OracleTxHash) assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOraclePending) // relay gas price l2Relayer.ProcessGasPriceOracle() - batch, err = blockBatchOrm.GetLatestBatch() + batch, err = batchOrm.GetLatestBatch(context.Background()) assert.NoError(t, err) assert.NotEmpty(t, batch.OracleTxHash) assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOracleImporting) diff --git a/bridge/tests/l2_message_relay_test.go b/bridge/tests/l2_message_relay_test.go deleted file mode 100644 index f37e2f219..000000000 --- a/bridge/tests/l2_message_relay_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package tests - -import ( - "context" - "errors" - "math/big" - "testing" - - "github.com/scroll-tech/go-ethereum/accounts/abi/bind" - "github.com/scroll-tech/go-ethereum/common" - gethTypes "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/rpc" - "github.com/stretchr/testify/assert" - "gorm.io/gorm" - - "scroll-tech/common/types" - "scroll-tech/common/types/message" - - "scroll-tech/bridge/internal/controller/relayer" - "scroll-tech/bridge/internal/controller/watcher" - "scroll-tech/bridge/internal/orm" - bridgeTypes "scroll-tech/bridge/internal/types" - "scroll-tech/bridge/internal/utils" -) - -func testRelayL2MessageSucceed(t *testing.T) { - db := setupDB(t) - defer utils.CloseDB(db) - - prepareContracts(t) - - l2Cfg := bridgeApp.Config.L2Config - - // Create L2Watcher - confirmations := rpc.LatestBlockNumber - l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db) - - // Create L2Relayer - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig) - assert.NoError(t, err) - - // Create L1Watcher - l1Cfg := bridgeApp.Config.L1Config - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) - - // send message through l2 messenger contract - nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{}) - assert.NoError(t, err) - sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0)) - assert.NoError(t, err) - sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx) - assert.NoError(t, err) - if sendReceipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil { - t.Fatalf("Call failed") - } - - // l2 watch process events - l2Watcher.FetchContractEvent() - l2MessageOrm := orm.NewL2Message(db) - blockTraceOrm := orm.NewBlockTrace(db) - blockBatchOrm := orm.NewBlockBatch(db) - - // check db status - msg, err := l2MessageOrm.GetL2MessageByNonce(nonce.Uint64()) - assert.NoError(t, err) - assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending) - assert.Equal(t, msg.Sender, l2Auth.From.String()) - assert.Equal(t, msg.Target, l1Auth.From.String()) - - // add fake blocks - traces := []*bridgeTypes.WrappedBlock{ - { - Header: &gethTypes.Header{ - Number: sendReceipt.BlockNumber, - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - }, - Transactions: nil, - WithdrawTrieRoot: common.Hash{}, - }, - } - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces)) - - parentBatch := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: "0x0000000000000000000000000000000000000000", - } - batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, l2Cfg.BatchProposerConfig.PublicInputConfig) - batchHash := batchData.Hash().String() - // add fake batch - err = db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - var blockIDs = make([]uint64, len(batchData.Batch.Blocks)) - for i, block := range batchData.Batch.Blocks { - blockIDs[i] = block.BlockNumber - } - dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) - assert.NoError(t, err) - - // add dummy proof - proof := &message.AggProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) - assert.NoError(t, err) - err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) - assert.NoError(t, err) - - // process pending batch and check status - assert.NoError(t, l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData})) - - blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(blockBatches)) - assert.NotEmpty(t, blockBatches[0].CommitTxHash) - assert.Equal(t, types.RollupCommitting, types.RollupStatus(blockBatches[0].RollupStatus)) - - commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash)) - assert.NoError(t, err) - commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) - assert.NoError(t, err) - assert.Equal(t, len(commitTxReceipt.Logs), 1) - - // fetch CommitBatch rollup events - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupCommitted, statuses[0]) - - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() - - blockBatchWithFinalizeTxHash, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(blockBatchWithFinalizeTxHash)) - assert.NotEmpty(t, blockBatchWithFinalizeTxHash[0].FinalizeTxHash) - assert.Equal(t, types.RollupFinalizing, types.RollupStatus(blockBatchWithFinalizeTxHash[0].RollupStatus)) - - finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatchWithFinalizeTxHash[0].FinalizeTxHash)) - assert.NoError(t, err) - finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) - assert.NoError(t, err) - assert.Equal(t, len(finalizeTxReceipt.Logs), 1) - - // fetch FinalizeBatch events - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalized, statuses[0]) - - // process l2 messages - l2Relayer.ProcessSavedEvents() - - l2Messages, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"nonce": nonce.Uint64()}, nil, 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(l2Messages)) - assert.NotEmpty(t, l2Messages[0].Layer1Hash) - assert.Equal(t, types.MsgStatus(l2Messages[0].Status), types.MsgSubmitted) - - relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(l2Messages[0].Layer1Hash)) - assert.NoError(t, err) - relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx) - assert.NoError(t, err) - assert.Equal(t, len(relayTxReceipt.Logs), 1) - - // fetch message relayed events - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - msg, err = l2MessageOrm.GetL2MessageByNonce(nonce.Uint64()) - assert.NoError(t, err) - assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed) -} diff --git a/bridge/tests/process_start_test.go b/bridge/tests/process_start_test.go new file mode 100644 index 000000000..5b0275c45 --- /dev/null +++ b/bridge/tests/process_start_test.go @@ -0,0 +1,58 @@ +package tests + +import ( + "crypto/rand" + "math/big" + "strconv" + "testing" + + _ "scroll-tech/bridge/cmd/event_watcher/app" + _ "scroll-tech/bridge/cmd/gas_oracle/app" + _ "scroll-tech/bridge/cmd/msg_relayer/app" + _ "scroll-tech/bridge/cmd/rollup_relayer/app" + + cutils "scroll-tech/common/utils" + + "scroll-tech/bridge/internal/utils" + + "github.com/stretchr/testify/assert" +) + +func testProcessStart(t *testing.T) { + db := setupDB(t) + defer utils.CloseDB(db) + + bridgeApp.RunApp(t, cutils.EventWatcherApp) + bridgeApp.RunApp(t, cutils.GasOracleApp) + bridgeApp.RunApp(t, cutils.MessageRelayerApp) + bridgeApp.RunApp(t, cutils.RollupRelayerApp) + + bridgeApp.WaitExit() +} + +func testProcessStartEnableMetrics(t *testing.T) { + db := setupDB(t) + defer utils.CloseDB(db) + + port, err := rand.Int(rand.Reader, big.NewInt(2000)) + assert.NoError(t, err) + svrPort := strconv.FormatInt(port.Int64()+50000, 10) + bridgeApp.RunApp(t, cutils.EventWatcherApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort) + + port, err = rand.Int(rand.Reader, big.NewInt(2000)) + assert.NoError(t, err) + svrPort = strconv.FormatInt(port.Int64()+50000, 10) + bridgeApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort) + + port, err = rand.Int(rand.Reader, big.NewInt(2000)) + assert.NoError(t, err) + svrPort = strconv.FormatInt(port.Int64()+50000, 10) + bridgeApp.RunApp(t, cutils.MessageRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort) + + port, err = rand.Int(rand.Reader, big.NewInt(2000)) + assert.NoError(t, err) + svrPort = strconv.FormatInt(port.Int64()+50000, 10) + bridgeApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort) + + bridgeApp.WaitExit() +} diff --git a/bridge/tests/rollup_test.go b/bridge/tests/rollup_test.go index 9eac1f04c..dc29129b2 100644 --- a/bridge/tests/rollup_test.go +++ b/bridge/tests/rollup_test.go @@ -2,7 +2,6 @@ package tests import ( "context" - "errors" "math/big" "testing" @@ -10,11 +9,11 @@ import ( "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "scroll-tech/common/types" "scroll-tech/common/types/message" + "scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/orm" @@ -37,15 +36,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) { l1Cfg := bridgeApp.Config.L1Config l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) - blockTraceOrm := orm.NewBlockTrace(db) - // add some blocks to db var wrappedBlocks []*bridgeTypes.WrappedBlock - var parentHash common.Hash - for i := 1; i <= 10; i++ { + for i := 0; i < 10; i++ { header := gethTypes.Header{ Number: big.NewInt(int64(i)), - ParentHash: parentHash, + ParentHash: common.Hash{}, Difficulty: big.NewInt(0), BaseFee: big.NewInt(0), } @@ -54,56 +50,47 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) { Transactions: nil, WithdrawTrieRoot: common.Hash{}, }) - parentHash = header.Hash() - } - assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(wrappedBlocks)) - - parentBatch := &bridgeTypes.BatchInfo{ - Index: 0, - Hash: "0x0000000000000000000000000000000000000000", } - tmpWrapBlocks := []*bridgeTypes.WrappedBlock{ - wrappedBlocks[0], - wrappedBlocks[1], - } - batchData := bridgeTypes.NewBatchData(parentBatch, tmpWrapBlocks, l2Cfg.BatchProposerConfig.PublicInputConfig) - - batchHash := batchData.Hash().String() - - blockBatchOrm := orm.NewBlockBatch(db) - err = db.Transaction(func(tx *gorm.DB) error { - rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData) - if dbTxErr != nil { - return dbTxErr - } - if rowsAffected != 1 { - dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1") - return dbTxErr - } - var blockIDs = make([]uint64, len(batchData.Batch.Blocks)) - for i, block := range batchData.Batch.Blocks { - blockIDs[i] = block.BlockNumber - } - dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, blockIDs, batchHash) - if dbTxErr != nil { - return dbTxErr - } - return nil - }) + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), wrappedBlocks) assert.NoError(t, err) - // process pending batch and check status - assert.NoError(t, l2Relayer.SendCommitTx([]*bridgeTypes.BatchData{batchData})) + cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxTxGasPerChunk: 1000000000, + MaxL2TxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MinL1CommitCalldataSizePerChunk: 0, + ChunkTimeoutSec: 300, + }, db) + cp.TryProposeChunk() - blockBatches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1) + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetUnbatchedChunks(context.Background()) assert.NoError(t, err) - assert.Equal(t, 1, len(blockBatches)) - assert.NotEmpty(t, true, blockBatches[0].CommitTxHash) - assert.NotEmpty(t, true, blockBatches[0].RollupStatus) - assert.Equal(t, types.RollupStatus(blockBatches[0].RollupStatus), types.RollupCommitting) + assert.Len(t, chunks, 1) - commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].CommitTxHash)) + bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxChunkNumPerBatch: 10, + MaxL1CommitGasPerBatch: 50000000000, + MaxL1CommitCalldataSizePerBatch: 1000000, + MinChunkNumPerBatch: 1, + BatchTimeoutSec: 300, + }, db) + bp.TryProposeBatch() + + l2Relayer.ProcessPendingBatches() + + batchOrm := orm.NewBatch(db) + batch, err := batchOrm.GetLatestBatch(context.Background()) + assert.NoError(t, err) + batchHash := batch.Hash + assert.NotEmpty(t, batch.CommitTxHash) + assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus)) + + assert.NoError(t, err) + commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash)) assert.NoError(t, err) commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx) assert.NoError(t, err) @@ -112,7 +99,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) { // fetch rollup events err = l1Watcher.FetchContractEvent() assert.NoError(t, err) - statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) + statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.Equal(t, types.RollupCommitted, statuses[0]) @@ -122,25 +109,24 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) { Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) + err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) assert.NoError(t, err) - err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified) + err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, types.ProvingTaskVerified) assert.NoError(t, err) // process committed batch and check status l2Relayer.ProcessCommittedBatches() - statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) + statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.Equal(t, types.RollupFinalizing, statuses[0]) - blockBatches, err = blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 1) + batch, err = batchOrm.GetLatestBatch(context.Background()) assert.NoError(t, err) - assert.Equal(t, 1, len(blockBatches)) - assert.NotEmpty(t, blockBatches[0].FinalizeTxHash) + assert.NotEmpty(t, batch.FinalizeTxHash) - finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(blockBatches[0].FinalizeTxHash)) + finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash)) assert.NoError(t, err) finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx) assert.NoError(t, err) @@ -149,7 +135,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) { // fetch rollup events err = l1Watcher.FetchContractEvent() assert.NoError(t, err) - statuses, err = blockBatchOrm.GetRollupStatusByHashList([]string{batchHash}) + statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash}) assert.NoError(t, err) assert.Equal(t, 1, len(statuses)) assert.Equal(t, types.RollupFinalized, statuses[0]) diff --git a/common/go.mod b/common/go.mod index afe5c5363..4f2812fdb 100644 --- a/common/go.mod +++ b/common/go.mod @@ -10,7 +10,7 @@ require ( github.com/mattn/go-isatty v0.0.18 github.com/modern-go/reflect2 v1.0.2 github.com/orcaman/concurrent-map v1.0.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 + github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa ) diff --git a/common/go.sum b/common/go.sum index 468dbe7dd..844a7c590 100644 --- a/common/go.sum +++ b/common/go.sum @@ -361,8 +361,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 h1:EIR8gXpdNwHnlUlA2giFp+EoRqHGtpINLjJvo31IGM4= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE= github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= diff --git a/common/version/version.go b/common/version/version.go index 98d0c3297..2eb98bd0c 100644 --- a/common/version/version.go +++ b/common/version/version.go @@ -5,7 +5,7 @@ import ( "runtime/debug" ) -var tag = "v3.4.0" +var tag = "v4.0.0" var commit = func() string { if info, ok := debug.ReadBuildInfo(); ok { diff --git a/contracts/src/L1/gateways/EnforcedTxGateway.sol b/contracts/src/L1/gateways/EnforcedTxGateway.sol index 5bf62b544..79ddc8eae 100644 --- a/contracts/src/L1/gateways/EnforcedTxGateway.sol +++ b/contracts/src/L1/gateways/EnforcedTxGateway.sol @@ -154,7 +154,7 @@ contract EnforcedTxGateway is OwnableUpgradeable, ReentrancyGuardUpgradeable, Pa } // append transaction - IL1MessageQueue(messageQueue).appendEnforcedTransaction(_sender, _target, _value, _gasLimit, _data); + IL1MessageQueue(_messageQueue).appendEnforcedTransaction(_sender, _target, _value, _gasLimit, _data); // refund fee to `_refundAddress` unchecked { diff --git a/coordinator/go.mod b/coordinator/go.mod index fc099ef9d..77dd219ed 100644 --- a/coordinator/go.mod +++ b/coordinator/go.mod @@ -6,7 +6,7 @@ require ( github.com/agiledragon/gomonkey/v2 v2.9.0 github.com/orcaman/concurrent-map v1.0.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 + github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa golang.org/x/exp v0.0.0-20230206171751-46f607a40771 diff --git a/coordinator/go.sum b/coordinator/go.sum index b147588d5..10c6d1b11 100644 --- a/coordinator/go.sum +++ b/coordinator/go.sum @@ -99,8 +99,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 h1:EIR8gXpdNwHnlUlA2giFp+EoRqHGtpINLjJvo31IGM4= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE= github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= diff --git a/coordinator/manager_test.go b/coordinator/manager_test.go index c2786ce53..cb6768414 100644 --- a/coordinator/manager_test.go +++ b/coordinator/manager_test.go @@ -655,6 +655,9 @@ func testListRollers(t *testing.T) { // test ListRollers if one roller closed. roller3.close() + // wait coordinator free completely + time.Sleep(time.Second * 5) + rollers, err = rollerManager.ListRollers() assert.NoError(t, err) var newRollersName []string diff --git a/database/go.mod b/database/go.mod index f9db9d722..858d5231d 100644 --- a/database/go.mod +++ b/database/go.mod @@ -7,7 +7,7 @@ require ( github.com/lib/pq v1.10.7 github.com/mattn/go-sqlite3 v1.14.14 github.com/pressly/goose/v3 v3.7.0 - github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 + github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa ) diff --git a/database/go.sum b/database/go.sum index a66e66c5d..d6c43d82b 100644 --- a/database/go.sum +++ b/database/go.sum @@ -39,8 +39,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 h1:EIR8gXpdNwHnlUlA2giFp+EoRqHGtpINLjJvo31IGM4= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/go.work.sum b/go.work.sum index 71c16cfbd..7ad13fdca 100644 --- a/go.work.sum +++ b/go.work.sum @@ -43,10 +43,13 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0= +github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o= github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8= github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -54,12 +57,15 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/avast/retry-go/v4 v4.1.0 h1:CwudD9anYv6JMVnDuTRlK6kLo4dBamiL+F3U8YDiyfg= +github.com/avast/retry-go/v4 v4.1.0/go.mod h1:HqmLvS2VLdStPCGDFjSuZ9pzlTqVRldCI4w2dO4m1Ms= github.com/aws/aws-sdk-go-v2 v1.2.0 h1:BS+UYpbsElC82gB+2E2jiCBg36i8HlubTB/dO/moQ9c= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -100,6 +106,7 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3 github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -112,6 +119,7 @@ github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572 h1:+R8G1+Ftumd0DaveLgMIjrFPcAS4G8MsVXWXiyZL5BY= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= @@ -120,6 +128,7 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c h1:llSLg4o9EgH3SrXky+Q5BqEYqV76NGKo07K5Ps2pIKo= github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= @@ -138,13 +147,17 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPc github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= +github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= github.com/djherbis/atime v1.1.0 h1:rgwVbP/5by8BvvjBNrbh64Qz33idKT3pSnMSJsxhi0g= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= @@ -155,6 +168,7 @@ github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= +github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= @@ -191,17 +205,22 @@ github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsY github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I= github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= @@ -209,20 +228,27 @@ github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dT github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -235,6 +261,7 @@ github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -242,14 +269,15 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEh github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v1.4.0 h1:CDSlSIuRL/Fsc72Ln5lMybtrCvSRDddsHsDRG/nP7Rg= github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= @@ -258,14 +286,11 @@ github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gt github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8= -github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91 h1:KyZDvZ/GGn+r+Y3DKZ7UOQ/TP4xV6HNkrwiVMB1GnNY= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo= github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE= @@ -274,37 +299,43 @@ github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6 h1:UzJnB7VRL github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/hkq+7cw9oYAt2PqUw52TZazRA0N7PGE= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4= github.com/iris-contrib/blackfriday v2.0.0+incompatible h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4= +github.com/iris-contrib/httpexpect/v2 v2.12.1/go.mod h1:7+RB6W5oNClX7PTwJgJnsQP3ZuUUYB3u61KCqeSgZ88= github.com/iris-contrib/jade v1.1.4 h1:WoYdfyJFfZIUgqNAeOyRfTNQZOksSlZ6+FnXR3AEpX0= +github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE= github.com/iris-contrib/pongo2 v0.0.1 h1:zGP7pW51oi5eQZMIlGA3I+FHY9/HOQWDB+572yin0to= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.17.0 h1:Hsx+baY8/zU2WtPLQyZi8WbecgcsWEeyoK1jvg/WgIo= +github.com/jackc/pgx/v4 v4.17.0/go.mod h1:Gd6RmOhtFLTu8cp/Fhq4kP195KrshxYJH3oW8AWJ1pw= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jsternberg/zap-logfmt v1.0.0 h1:0Dz2s/eturmdUS34GM82JwNEdQ9hPoJgqptcEKcbpzY= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kataras/jwt v0.1.8 h1:u71baOsYD22HWeSOg32tCHbczPjdCk7V4MMeJqTtmGk= +github.com/kataras/jwt v0.1.8/go.mod h1:Q5j2IkcIHnfwy+oNY3TVWuEBJNw0ADgCcXK9CaZwV4o= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= @@ -312,70 +343,80 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfM github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= -github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw= github.com/labstack/echo/v4 v4.9.0 h1:wPOF1CE6gvt/kmbMR4dGzWvHMPT+sAEUJOwOTtvITVY= +github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o= +github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk= github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 h1:JAEbJn3j/FrhdWA9jW8B5ajsLIjeuEHLi8xE4fk997o= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U= github.com/mattn/goveralls v0.0.2 h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.9.11/go.mod h1:b0oVuxSlkvS3ZjMkncFeACGyZohbO4XhSqW1Lt7iRRY= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU= github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU= +github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0= @@ -383,7 +424,7 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6/go.mod h1:eW+eyNdMoO0MyuczCc9xWSnW8dPJ0kOy5xsxgOKYEaA= github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod h1:f9ygxrxL7WRCTzuloV+t/UlcxMq3AL+gcNU60liiNNU= github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= @@ -391,11 +432,16 @@ github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPO github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= +github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -404,17 +450,8 @@ github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344 h1:m+8fKfQwCAy1QjzINvKe/pYtLjo2dl59x2w9YSEJxuY= -github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= -github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= @@ -422,22 +459,28 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1Yc= +github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc= github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= +go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= +go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= @@ -463,18 +506,15 @@ golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -491,8 +531,6 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw= @@ -501,6 +539,7 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoy google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= @@ -509,20 +548,21 @@ google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/l google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= -gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0= gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.1/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.2/go.mod h1:OYajnUAcI/MX+XD/Wx7v1bbdvcQSvxgtb0gC+u3d3eg= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= diff --git a/roller/go.mod b/roller/go.mod index e6f8f0a4c..33b7d3bcf 100644 --- a/roller/go.mod +++ b/roller/go.mod @@ -3,7 +3,7 @@ module scroll-tech/roller go 1.19 require ( - github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 + github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa go.etcd.io/bbolt v1.3.7 diff --git a/roller/go.sum b/roller/go.sum index 9b23cd234..891e0069d 100644 --- a/roller/go.sum +++ b/roller/go.sum @@ -88,8 +88,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433 h1:EIR8gXpdNwHnlUlA2giFp+EoRqHGtpINLjJvo31IGM4= -github.com/scroll-tech/go-ethereum v1.10.14-0.20230607142419-983d63024433/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= +github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY= github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE= github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= diff --git a/tests/integration-test/go.mod b/tests/integration-test/go.mod index 2a2baa2f9..edbd75bf6 100644 --- a/tests/integration-test/go.mod +++ b/tests/integration-test/go.mod @@ -30,7 +30,7 @@ require ( github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - golang.org/x/crypto v0.9.0 // indirect + golang.org/x/crypto v0.10.0 // indirect golang.org/x/sys v0.9.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/tests/integration-test/go.sum b/tests/integration-test/go.sum index da14c2a03..8d3fa9198 100644 --- a/tests/integration-test/go.sum +++ b/tests/integration-test/go.sum @@ -1,54 +1,110 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk= +github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= +github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI= github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE= +github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tests/integration-test/integration_test.go b/tests/integration-test/integration_test.go index 1782f984e..f97950c13 100644 --- a/tests/integration-test/integration_test.go +++ b/tests/integration-test/integration_test.go @@ -12,13 +12,8 @@ import ( "github.com/stretchr/testify/assert" bcmd "scroll-tech/bridge/cmd" - _ "scroll-tech/bridge/cmd/event_watcher/app" - _ "scroll-tech/bridge/cmd/gas_oracle/app" - _ "scroll-tech/bridge/cmd/msg_relayer/app" - _ "scroll-tech/bridge/cmd/rollup_relayer/app" "scroll-tech/common/docker" - "scroll-tech/common/utils" rapp "scroll-tech/roller/cmd/app" @@ -52,19 +47,12 @@ func TestStartProcess(t *testing.T) { // Reset db. assert.NoError(t, migrate.ResetDB(base.DBClient(t))) - // Run bridge apps. - bridgeApp.RunApp(t, utils.EventWatcherApp) - bridgeApp.RunApp(t, utils.GasOracleApp) - bridgeApp.RunApp(t, utils.MessageRelayerApp) - bridgeApp.RunApp(t, utils.RollupRelayerApp) - // Run coordinator app. coordinatorApp.RunApp(t) // Run roller app. rollerApp.RunApp(t) // Free apps. - bridgeApp.WaitExit() rollerApp.WaitExit() coordinatorApp.WaitExit() } @@ -75,50 +63,22 @@ func TestMonitorMetrics(t *testing.T) { // Reset db. assert.NoError(t, migrate.ResetDB(base.DBClient(t))) - port1, _ := rand.Int(rand.Reader, big.NewInt(2000)) - svrPort1 := strconv.FormatInt(port1.Int64()+50000, 10) - bridgeApp.RunApp(t, utils.EventWatcherApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort1) - - port2, _ := rand.Int(rand.Reader, big.NewInt(2000)) - svrPort2 := strconv.FormatInt(port2.Int64()+50000, 10) - bridgeApp.RunApp(t, utils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort2) - - port3, _ := rand.Int(rand.Reader, big.NewInt(2000)) - svrPort3 := strconv.FormatInt(port3.Int64()+50000, 10) - bridgeApp.RunApp(t, utils.MessageRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort3) - - port4, _ := rand.Int(rand.Reader, big.NewInt(2000)) - svrPort4 := strconv.FormatInt(port4.Int64()+50000, 10) - bridgeApp.RunApp(t, utils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort4) - // Start coordinator process with metrics server. - port5, _ := rand.Int(rand.Reader, big.NewInt(2000)) - svrPort5 := strconv.FormatInt(port5.Int64()+52000, 10) - coordinatorApp.RunApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort5) + port, _ := rand.Int(rand.Reader, big.NewInt(2000)) + svrPort := strconv.FormatInt(port.Int64()+52000, 10) + coordinatorApp.RunApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort) - // Get bridge monitor metrics. - resp, err := http.Get("http://localhost:" + svrPort1) + // Get coordinator monitor metrics. + resp, err := http.Get("http://localhost:" + svrPort) assert.NoError(t, err) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) assert.NoError(t, err) bodyStr := string(body) assert.Equal(t, 200, resp.StatusCode) - assert.Equal(t, true, strings.Contains(bodyStr, "bridge_l1_msgs_sync_height")) - assert.Equal(t, true, strings.Contains(bodyStr, "bridge_l2_msgs_sync_height")) - - // Get coordinator monitor metrics. - resp, err = http.Get("http://localhost:" + svrPort5) - assert.NoError(t, err) - defer resp.Body.Close() - body, err = ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - bodyStr = string(body) - assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, true, strings.Contains(bodyStr, "coordinator_sessions_timeout_total")) assert.Equal(t, true, strings.Contains(bodyStr, "coordinator_rollers_disconnects_total")) // Exit. - bridgeApp.WaitExit() coordinatorApp.WaitExit() }