mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
17 Commits
v4.0.1
...
fix_insert
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b7b7dab86 | ||
|
|
075d25ac87 | ||
|
|
f87dca41e6 | ||
|
|
507ee571f6 | ||
|
|
9651e1ca6e | ||
|
|
672c2dd49c | ||
|
|
3d9fce26b6 | ||
|
|
95124ce70e | ||
|
|
f8d4855f26 | ||
|
|
e303fafefc | ||
|
|
f00c400993 | ||
|
|
bb6428848f | ||
|
|
df97200a41 | ||
|
|
c8146ebb1a | ||
|
|
5390ec93b4 | ||
|
|
3ce671098a | ||
|
|
7e9fb0c667 |
13
Makefile
13
Makefile
@@ -1,7 +1,5 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
ZKP_VERSION=release-1220
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -40,16 +38,5 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
|
||||
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
|
||||
|
||||
|
||||
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
|
||||
mkdir -p test_params
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
|
||||
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
|
||||
cd ./roller && make test-gpu-prover
|
||||
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
|
||||
cd ./coordinator && make test-gpu-verifier
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -43,6 +43,8 @@ var (
|
||||
|
||||
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
|
||||
ScrollChainABI *abi.ABI
|
||||
// ScrollChainV2ABI holds information about ScrollChainV2's context and available invokable methods.
|
||||
ScrollChainV2ABI *abi.ABI
|
||||
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
|
||||
L1ScrollMessengerABI *abi.ABI
|
||||
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
|
||||
@@ -116,6 +118,7 @@ func init() {
|
||||
|
||||
// scroll monorepo
|
||||
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
|
||||
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
|
||||
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
|
||||
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
|
||||
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
|
||||
@@ -198,6 +201,11 @@ var ScrollChainMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch\",\"name\":\"batch\",\"type\":\"tuple\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch[]\",\"name\":\"batches\",\"type\":\"tuple[]\"}],\"name\":\"commitBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"getL2MessageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// ScrollChainV2MetaData contains all meta data concerning the ScrollChain contract.
|
||||
var ScrollChainV2MetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract.
|
||||
var L1ScrollMessengerMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL1ScrollMessenger.L2MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
|
||||
|
||||
@@ -75,7 +75,7 @@ func (c *CrossMsgFetcher) Start() {
|
||||
return
|
||||
case <-tick.C:
|
||||
c.mu.Lock()
|
||||
c.forwardFetchAndSaveMissingEvents(0)
|
||||
c.forwardFetchAndSaveMissingEvents(1)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
@@ -104,6 +105,14 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
for i := range depositL1CrossMsgs {
|
||||
for _, msgHash := range msgHashes {
|
||||
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
|
||||
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
@@ -120,11 +129,6 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
err = updateL1CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to update msgHash in L1 cross msg", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
@@ -157,11 +161,22 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l2 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL2CrossMsgs, msgHashes, relayedMsg, l2sentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
var l2SentMsgs []*orm.L2SentMsg
|
||||
for i := range depositL2CrossMsgs {
|
||||
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
|
||||
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
|
||||
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
|
||||
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
|
||||
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
@@ -179,16 +194,12 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
|
||||
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2sentMsgs)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
if len(l2SentMsgs) > 0 {
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
create table cross_message
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL DEFAULT '',
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
@@ -12,30 +12,30 @@ create table cross_message
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
token_id BIGINT NOT NULL DEFAULT 0,
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
token_ids TEXT NOT NULL DEFAULT '',
|
||||
token_amounts TEXT NOT NULL DEFAULT '',
|
||||
block_timestamp TIMESTAMP(0) DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_msg_type
|
||||
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
|
||||
|
||||
comment
|
||||
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
|
||||
|
||||
comment
|
||||
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
|
||||
|
||||
comment
|
||||
on column cross_message.is_deleted is 'NotDeleted false, Deleted true';
|
||||
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX valid_l1_msg_index ON cross_message (layer1_hash, is_deleted);
|
||||
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
|
||||
|
||||
CREATE INDEX valid_l2_msg_index ON cross_message (layer2_hash, is_deleted);
|
||||
|
||||
CREATE INDEX valid_height_index ON cross_message (height, msg_type, is_deleted);
|
||||
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -49,22 +49,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON cross_message
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -7,17 +7,17 @@ create table relayed_msg
|
||||
height BIGINT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column relayed_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
create unique index uk_msg_hash_l1_hash_l2_hash
|
||||
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index relayed_msg_hash_uindex
|
||||
on relayed_msg (msg_hash);
|
||||
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -31,22 +31,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON relayed_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
create table l2_sent_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
tx_sender VARCHAR NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
@@ -12,14 +13,16 @@ create table l2_sent_msg
|
||||
batch_index BIGINT NOT NULL DEFAULT 0,
|
||||
msg_proof TEXT NOT NULL DEFAULT '',
|
||||
msg_data TEXT NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_sent_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
create unique index uk_msg_hash
|
||||
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_nonce
|
||||
on l2_sent_msg (nonce) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -33,22 +36,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE l2_sent_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON l2_sent_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -8,12 +8,17 @@ create table rollup_batch
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_batch_index
|
||||
on rollup_batch (batch_index) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_batch_hash
|
||||
on rollup_batch (batch_hash) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
@@ -26,21 +31,6 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE rollup_batch SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON rollup_batch
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -2,7 +2,6 @@ package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -40,14 +39,6 @@ func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*Ro
|
||||
"start_block_number": batch.StartBlockNumber,
|
||||
"end_block_number": batch.EndBlockNumber,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM rollup_batch WHERE batch_index = $1 AND NOT is_deleted)`, batch.BatchIndex).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertRollupBatchDBTx: batch index %v already exists at height %v", batch.BatchIndex, batch.CommitHeight)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
|
||||
if err != nil {
|
||||
|
||||
@@ -40,31 +40,24 @@ const (
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenID uint64 `json:"token_id" db:"token_id"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenIDs string `json:"token_ids" db:"token_ids"`
|
||||
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
// L1CrossMsgOrm provides operations on l1_cross_message table
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -23,7 +22,7 @@ func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND NOT is_deleted;`, l1Hash.String(), Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -37,7 +36,7 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND NOT is_deleted;`, sender.String(), Layer1Msg)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
@@ -66,22 +65,15 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
"target": msg.Target,
|
||||
"amount": msg.Amount,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_id": msg.TokenID,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"msg_type": Layer1Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer1_hash = $1 AND NOT is_deleted)`, msg.Layer1Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL1CrossMsgDBTx: l1 cross msg layer1Hash %v already exists at height %v", msg.Layer1Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -92,7 +84,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
|
||||
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -100,7 +92,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -108,7 +100,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHas
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -123,21 +115,21 @@ func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET is_deleted = true WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -23,7 +22,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -37,7 +36,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND NOT is_deleted;`, sender.String(), Layer2Msg)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
@@ -56,7 +55,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossM
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET is_deleted = true where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
if err != nil {
|
||||
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
|
||||
return err
|
||||
@@ -72,29 +71,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_id": msg.TokenID,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"amount": msg.Amount,
|
||||
"msg_type": Layer2Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted)`, msg.Layer2Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2CrossMsgDBTx: l2 cross msg layer2Hash %v already exists at height %v", msg.Layer2Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type, msg_hash) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type, :msg_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -103,21 +94,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -132,14 +123,14 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
|
||||
@@ -3,7 +3,6 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
|
||||
type L2SentMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
TxSender string `json:"tx_sender" db:"tx_sender"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
@@ -21,7 +21,6 @@ type L2SentMsg struct {
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
@@ -38,7 +37,7 @@ func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msgHash)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -53,6 +52,7 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"tx_sender": msg.TxSender,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
@@ -63,25 +63,17 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM l2_sent_msg WHERE (msg_hash = $1 OR nonce = $2) AND NOT is_deleted)`, msg.MsgHash, msg.Nonce).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2SentMsgDBTx: l2 sent msg_hash %v already exists at height %v", msg.MsgHash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "msg_Hash", "err", err)
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE NOT is_deleted ORDER BY nonce DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -96,14 +88,14 @@ func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND NOT is_deleted;"), proof, batch_index, msgHash); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND NOT is_deleted ORDER BY batch_index DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index !=0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -119,7 +111,7 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND NOT is_deleted ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,7 +127,7 @@ func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight u
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND NOT is_deleted;`, nonce)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -145,7 +137,7 @@ func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND NOT is_deleted order by nonce desc limit 1`, endBlockNumber)
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -154,6 +146,6 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2Sen
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET is_deleted = true WHERE height > $1;`, height)
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,13 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
}
|
||||
|
||||
type relayedMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
@@ -33,7 +40,7 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", "err", err)
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -41,7 +48,7 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
|
||||
result := &RelayedMsg{}
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msg_hash)
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -52,7 +59,7 @@ func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -67,7 +74,7 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -82,11 +89,11 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, erro
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
if err != nil || rows == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: l2sentMsg.MsgProof,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
|
||||
@@ -18,6 +18,11 @@ type MsgHashWrapper struct {
|
||||
TxHash common.Hash
|
||||
}
|
||||
|
||||
type L2SentMsgWrapper struct {
|
||||
L2SentMsg *orm.L2SentMsg
|
||||
TxHash common.Hash
|
||||
}
|
||||
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
@@ -81,7 +86,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
@@ -98,7 +103,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
@@ -131,15 +136,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, nil
|
||||
}
|
||||
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
// this is use to confirm finalized l1 msg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var l2SentMsg []*orm.L2SentMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
var l2SentMsg []L2SentMsgWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
@@ -147,7 +151,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -162,7 +166,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -179,7 +183,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -189,14 +193,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -206,7 +210,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
@@ -214,27 +218,28 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
l2SentMsg = append(l2SentMsg, &orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
l2SentMsg = append(l2SentMsg,
|
||||
L2SentMsgWrapper{
|
||||
TxHash: vlog.TxHash,
|
||||
L2SentMsg: &orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
},
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
@@ -244,7 +249,7 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
|
||||
|
||||
}
|
||||
}
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, nil
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsg, nil
|
||||
}
|
||||
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package utils
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@@ -62,6 +63,53 @@ func ComputeMessageHash(
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
ParentBatchHeader []byte
|
||||
Chunks [][]byte
|
||||
SkippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV2 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error) {
|
||||
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
args := commitBatchArgs{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
var finishBlock uint64
|
||||
|
||||
// decode batchIndex from ParentBatchHeader
|
||||
if len(args.ParentBatchHeader) < 9 {
|
||||
return 0, 0, 0, errors.New("invalid parent batch header")
|
||||
}
|
||||
batchIndex := binary.BigEndian.Uint64(args.ParentBatchHeader[1:9]) + 1
|
||||
|
||||
// decode blocks from chunk and assume that there's no empty chunk
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(args.Chunks) == 0 {
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := args.Chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
startBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
chunk = args.Chunks[len(args.Chunks)-1]
|
||||
lastBlockIndex := int(chunk[0]) - 1
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return batchIndex, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
|
||||
var batchIndices []uint64
|
||||
|
||||
@@ -20,6 +20,22 @@ func TestKeccak2(t *testing.T) {
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV2(t *testing.T) {
|
||||
// single chunk
|
||||
batchIndex, start, finish, err := utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(1))
|
||||
assert.Equal(t, finish, uint64(1))
|
||||
assert.Equal(t, batchIndex, uint64(1))
|
||||
|
||||
// multiple chunk
|
||||
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(10))
|
||||
assert.Equal(t, finish, uint64(20))
|
||||
assert.Equal(t, batchIndex, uint64(2))
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
|
||||
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
|
||||
assert.NoError(t, err)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -73,6 +73,19 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportGenesisBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
batchHeader := []byte{}
|
||||
stateRoot := common.Hash{}
|
||||
|
||||
_, err = l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackRelayL1Message(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -31,6 +31,7 @@ func init() {
|
||||
app.Usage = "The Scroll Rollup Relayer"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.RollupRelayerFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
@@ -70,7 +71,8 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -3,8 +3,10 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -42,10 +44,10 @@ type Layer2Relayer struct {
|
||||
|
||||
l2Client *ethclient.Client
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
l2MessageOrm *orm.L2Message
|
||||
db *gorm.DB
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
@@ -78,7 +80,7 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -115,11 +117,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
@@ -142,10 +144,127 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if initGenesis {
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) initializeGenesis() error {
|
||||
if count, err := r.batchOrm.GetBatchCount(r.ctx); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported", "batch count", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := r.l2Client.HeaderByNumber(r.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateProvingStatus(r.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
var batch *orm.Batch
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateBatchHashInRange(r.ctx, 0, 0, batch.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateProvingStatus(r.ctx, batch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateRollupStatus(r.ctx, batch.Hash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
// commit genesis batch on L1
|
||||
// note: we do this inside the DB transaction so that we can revert all DB changes if this step fails
|
||||
return r.commitGenesisBatch(batch.Hash, batch.BatchHeader, common.HexToHash(batch.StateRoot))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update genesis transaction failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis chunk and batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
}
|
||||
if !confirmation.IsSuccessful {
|
||||
return fmt.Errorf("import genesis batch tx failed")
|
||||
}
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
@@ -400,7 +519,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
|
||||
// check whether it is CommitBatches transaction
|
||||
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
|
||||
@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -57,12 +57,12 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
@@ -73,36 +73,36 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, types.ProvingTaskVerified)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -113,26 +113,26 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, provingStatus)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
return batchHash
|
||||
return batch.Hash
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
@@ -177,7 +177,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -187,9 +187,9 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
var err error
|
||||
batchHashes[i], err = batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
@@ -235,17 +235,17 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchHash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -255,8 +255,8 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
}
|
||||
|
||||
confirmations := []BatchConfirmation{
|
||||
{batchHash: batchHash1, isSuccessful: true},
|
||||
{batchHash: batchHash2, isSuccessful: false},
|
||||
{batchHash: batch1.Hash, isSuccessful: true},
|
||||
{batchHash: batch2.Hash, isSuccessful: false},
|
||||
}
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
@@ -283,7 +283,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
|
||||
@@ -72,14 +72,13 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
endChunkIndex := dbChunks[numChunks-1].Index
|
||||
endChunkHash := dbChunks[numChunks-1].Hash
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var batchHash string
|
||||
batchHash, err = p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
err = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batchHash, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -106,15 +106,6 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx gas limit; block number: %v, gas used: %v, max gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalTxGasUsed,
|
||||
p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
@@ -133,6 +124,16 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for i, block := range blocks[1:] {
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
|
||||
@@ -24,10 +24,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
@@ -41,7 +40,6 @@ type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
batchOrm *orm.Batch
|
||||
|
||||
@@ -91,7 +89,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
l1MessageOrm: l1MessageOrm,
|
||||
l1BlockOrm: l1BlockOrm,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -227,18 +224,16 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount, "RollupEventCount", rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchHashes []string
|
||||
@@ -272,21 +267,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
var msgStatus types.MsgStatus
|
||||
if msg.isSuccessful {
|
||||
msgStatus = types.MsgConfirmed
|
||||
} else {
|
||||
msgStatus = types.MsgFailed
|
||||
}
|
||||
if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -298,11 +278,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
var l1Messages []*orm.L1Message
|
||||
var relayedMessages []relayedMessage
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
@@ -311,7 +290,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
@@ -327,38 +306,12 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L1RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case bridgeAbi.L1FailedRelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L1CommitBatchEventSignature:
|
||||
event := bridgeAbi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -371,7 +324,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
return l1Messages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -384,5 +337,5 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
}
|
||||
}
|
||||
|
||||
return l1Messages, relayedMessages, rollupEvents, nil
|
||||
return l1Messages, rollupEvents, nil
|
||||
}
|
||||
|
||||
@@ -159,14 +159,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
|
||||
convey.Convey("parse bridge event logs failure", t, func() {
|
||||
targetErr := errors.New("parse log failure")
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
return nil, nil, nil, targetErr
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
return nil, nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
rollupEvents := []rollupEvent{
|
||||
{
|
||||
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
@@ -179,20 +179,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
status: commonTypes.RollupCommitted,
|
||||
},
|
||||
}
|
||||
|
||||
relayedMessageEvents := []relayedMessage{
|
||||
{
|
||||
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
isSuccessful: true,
|
||||
},
|
||||
{
|
||||
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
|
||||
isSuccessful: false,
|
||||
},
|
||||
}
|
||||
return nil, relayedMessageEvents, rollupEvents, nil
|
||||
return nil, rollupEvents, nil
|
||||
})
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
@@ -250,20 +237,6 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
var l2MessageOrm *orm.L2Message
|
||||
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
|
||||
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var l1MessageOrm *orm.L1Message
|
||||
convey.Convey("db save l1 message failure", t, func() {
|
||||
targetErr := errors.New("SaveL1Messages failure")
|
||||
@@ -303,10 +276,9 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -323,102 +295,14 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, l2Messages, 1)
|
||||
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack RelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog RelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -437,10 +321,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -453,10 +336,9 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
|
||||
@@ -481,10 +363,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -497,10 +378,9 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
|
||||
|
||||
@@ -2,7 +2,6 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -32,8 +31,6 @@ var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
@@ -44,12 +41,8 @@ type L2WatcherClient struct {
|
||||
|
||||
*ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
@@ -68,24 +61,29 @@ type L2WatcherClient struct {
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
if err != nil || l1msg == nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
|
||||
if err != nil || receipt == nil {
|
||||
log.Warn("get tx from l2 failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
savedHeight = receipt.BlockNumber.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
w := L2WatcherClient{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
Client: client,
|
||||
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l2MessageOrm: l2MessageOrm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedMsgHeight: savedHeight,
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -98,78 +96,9 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
stopped: 0,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if err := w.initializeGenesis(); err != nil {
|
||||
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
|
||||
}
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) initializeGenesis() error {
|
||||
if count, err := w.batchOrm.GetBatchCount(w.ctx); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported", "batch count", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
err = w.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = w.chunkOrm.InsertChunk(w.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
|
||||
if err = w.chunkOrm.UpdateProvingStatus(w.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
var batchHash string
|
||||
batchHash, err = w.batchOrm.InsertBatch(w.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
|
||||
if err = w.chunkOrm.UpdateBatchHashInRange(w.ctx, 0, 0, batchHash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
|
||||
}
|
||||
|
||||
if err = w.batchOrm.UpdateProvingStatus(w.ctx, batchHash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = w.batchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update genesis transaction failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis chunk and batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
@@ -201,10 +130,20 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
|
||||
txsData := make([]*gethTypes.TransactionData, len(txs))
|
||||
for i, tx := range txs {
|
||||
v, r, s := tx.RawSignatureValues()
|
||||
|
||||
nonce := tx.Nonce()
|
||||
|
||||
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
|
||||
// does not have this field. Since `L1MessageTx` do not have a nonce,
|
||||
// we reuse this field for storing the queue index.
|
||||
if msg := tx.AsL1MessageTx(); msg != nil {
|
||||
nonce = msg.QueueIndex
|
||||
}
|
||||
|
||||
txsData[i] = &gethTypes.TransactionData{
|
||||
Type: tx.Type(),
|
||||
TxHash: tx.Hash().String(),
|
||||
Nonce: tx.Nonce(),
|
||||
Nonce: nonce,
|
||||
ChainId: (*hexutil.Big)(tx.ChainId()),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: (*hexutil.Big)(tx.GasPrice()),
|
||||
@@ -302,17 +241,15 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
bridgeL2MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
log.Info("L2 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount)
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
@@ -329,71 +266,24 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2Messages []orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
var lastAppendMsgHash common.Hash
|
||||
var lastAppendMsgNonce uint64
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case bridgeAbi.L2SentMessageEventSignature:
|
||||
event := bridgeAbi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Error("failed to unpack layer2 SentMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.MessageNonce,
|
||||
event.Message,
|
||||
)
|
||||
|
||||
// `AppendMessage` event is always emitted before `SentMessage` event
|
||||
// So they should always match, just double check
|
||||
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
|
||||
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
|
||||
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
|
||||
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
|
||||
l2Messages = append(l2Messages, orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
Target: event.Target.String(),
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L2RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
return relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -406,7 +296,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
return relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -414,21 +304,9 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Me
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L2AppendMessageEventSignature:
|
||||
event := bridgeAbi.L2AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
lastAppendMsgHash = event.MessageHash
|
||||
lastAppendMsgNonce = event.Index.Uint64()
|
||||
bridgeL2MsgsAppendEventsTotalCounter.Inc(1)
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
}
|
||||
|
||||
return l2Messages, relayedMessages, nil
|
||||
return relayedMessages, nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -67,137 +66,6 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
|
||||
}
|
||||
|
||||
func testMonitorBridgeContract(t *testing.T) {
|
||||
wc, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message = []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
// check l1 messages.
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 2
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
var tx *gethTypes.Transaction
|
||||
for i := 0; i < numTransactions; i++ {
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 5
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -244,57 +112,6 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
|
||||
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{
|
||||
bridgeAbi.L2SentMessageEventSignature,
|
||||
},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack SentMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog SentMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2SentMessageEventSignature success", t, func() {
|
||||
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
tmpValue := big.NewInt(1000)
|
||||
tmpMessageNonce := big.NewInt(100)
|
||||
tmpMessage := []byte("test for L2SentMessageEventSignature")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
|
||||
tmpOut.Sender = tmpSendAddr
|
||||
tmpOut.Value = tmpValue
|
||||
tmpOut.Target = tmpTargetAddr
|
||||
tmpOut.MessageNonce = tmpMessageNonce
|
||||
tmpOut.Message = tmpMessage
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, l2Messages)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
@@ -314,9 +131,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -329,9 +145,8 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
@@ -356,9 +171,8 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -371,51 +185,9 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack AppendMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog AppendMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2AppendMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
tmpOut.Index = big.NewInt(100)
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,20 +100,14 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
|
||||
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
|
||||
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
|
||||
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
|
||||
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
|
||||
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
|
||||
|
||||
// Run chunk-proposer test cases.
|
||||
t.Run("TestChunkProposer", testChunkProposer)
|
||||
|
||||
@@ -189,9 +189,9 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (string, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return "", errors.New("invalid args")
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -202,7 +202,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get the latest batch", "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batchIndex uint64
|
||||
@@ -221,7 +221,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
|
||||
@@ -233,7 +233,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
log.Error("failed to create batch header",
|
||||
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
|
||||
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numChunks := len(chunks)
|
||||
@@ -255,10 +255,10 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
|
||||
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
|
||||
log.Error("failed to insert batch", "batch", newBatch, "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBatch.Hash, nil
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateSkippedBatches updates the skipped batches in the database.
|
||||
|
||||
@@ -52,6 +52,16 @@ func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
|
||||
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
|
||||
var msg *L1Message
|
||||
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
|
||||
var msgs []L1Message
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
type L2Message struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Value string `json:"value" gorm:"column:value"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Calldata string `json:"calldata" gorm:"column:calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:NULL"`
|
||||
Proof string `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
Status int `json:"status" gorm:"column:status;default:1"`
|
||||
}
|
||||
|
||||
// NewL2Message create an L2Message instance
|
||||
func NewL2Message(db *gorm.DB) *L2Message {
|
||||
return &L2Message{db: db}
|
||||
}
|
||||
|
||||
// TableName define the L2Message table name
|
||||
func (*L2Message) TableName() string {
|
||||
return "l2_message"
|
||||
}
|
||||
|
||||
// GetL2Messages fetch list of messages given msg status
|
||||
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
|
||||
var l2MsgList []L2Message
|
||||
db := m.db
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&l2MsgList).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l2MsgList, nil
|
||||
}
|
||||
|
||||
// GetLayer2LatestWatchedHeight returns latest height stored in the table
|
||||
func (m *L2Message) GetLayer2LatestWatchedHeight() (int64, error) {
|
||||
// @note It's not correct, since we may don't have message in some blocks.
|
||||
// But it will only be called at start, some redundancy is acceptable.
|
||||
result := m.db.Model(&L2Message{}).Select("COALESCE(MAX(height), -1)").Row()
|
||||
if result.Err() != nil {
|
||||
return -1, result.Err()
|
||||
}
|
||||
|
||||
var maxNumber int64
|
||||
if err := result.Scan(&maxNumber); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetL2MessageByNonce fetch message by nonce
|
||||
// for unit test
|
||||
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
|
||||
var msg L2Message
|
||||
err := m.db.Where("nonce", nonce).First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := m.db.WithContext(ctx).Create(&messages).Error
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
nonces = append(nonces, msg.Nonce)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateLayer2Status updates message stauts, given message hash
|
||||
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
|
||||
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
|
||||
updateFields := map[string]interface{}{
|
||||
"status": int(status),
|
||||
"layer1_hash": layer1Hash,
|
||||
}
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 6, int(cur))
|
||||
assert.Equal(t, 5, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
@@ -30,4 +30,4 @@ on l1_block (number);
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
-- +goose StatementEnd
|
||||
@@ -1,37 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_message
|
||||
(
|
||||
nonce BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer2_hash VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
create unique index l2_message_nonce_uindex
|
||||
on l2_message (nonce);
|
||||
|
||||
create index l2_message_height_index
|
||||
on l2_message (height);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_message;
|
||||
-- +goose StatementEnd
|
||||
@@ -188,20 +188,22 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
hash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err := batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash1 := batchHeader1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
hash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err := batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -36,7 +36,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range chunks {
|
||||
for chunkID, chunk := range chunks {
|
||||
// build data hash
|
||||
totalL1MessagePoppedBeforeChunk := nextIndex
|
||||
chunkHash, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
|
||||
@@ -46,7 +46,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
dataBytes = append(dataBytes, chunkHash.Bytes()...)
|
||||
|
||||
// build skip bitmap
|
||||
for _, block := range chunk.Blocks {
|
||||
for blockID, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
continue
|
||||
@@ -54,7 +54,7 @@ func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d", nextIndex, currentIndex)
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
|
||||
@@ -68,7 +68,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
|
||||
@@ -29,7 +29,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
@@ -161,11 +159,18 @@ type RollerStatus struct {
|
||||
|
||||
// SessionInfo is assigned rollers info of a block batch (session)
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
Attempts uint8 `json:"attempts,omitempty"`
|
||||
ProveType message.ProveType `json:"prove_type,omitempty"`
|
||||
ID int `json:"id" db:"id"`
|
||||
TaskID string `json:"task_id" db:"task_id"`
|
||||
RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
|
||||
ProveType int16 `json:"prove_type" db:"prove_type"`
|
||||
RollerName string `json:"roller_name" db:"roller_name"`
|
||||
ProvingStatus int16 `json:"proving_status" db:"proving_status"`
|
||||
FailureType int16 `json:"failure_type" db:"failure_type"`
|
||||
Reward uint64 `json:"reward" db:"reward"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
|
||||
@@ -16,6 +16,10 @@ var (
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
@@ -66,4 +70,10 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.0.1"
|
||||
var tag = "v4.0.7-fix"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -71,7 +71,7 @@ Reference testnet [run_deploy_contracts.sh](https://github.com/scroll-tech/testn
|
||||
|
||||
## Deployment using Foundry
|
||||
|
||||
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_ZK_ROLLUP_PROXY_ADDR` as environment variables.
|
||||
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_SCROLL_CHAIN_PROXY_ADDR` as environment variables.
|
||||
|
||||
```bash
|
||||
# allexport
|
||||
@@ -101,4 +101,4 @@ $ source .env.l2_addresses
|
||||
# Initialize contracts
|
||||
$ forge script scripts/foundry/InitializeL1BridgeContracts.s.sol:InitializeL1BridgeContracts --rpc-url $SCROLL_L1_RPC --broadcast
|
||||
$ forge script scripts/foundry/InitializeL2BridgeContracts.s.sol:InitializeL2BridgeContracts --rpc-url $SCROLL_L2_RPC --broadcast
|
||||
```
|
||||
```
|
||||
|
||||
@@ -11,7 +11,7 @@ async function main() {
|
||||
|
||||
const ScrollChainCommitmentVerifier = await ethers.getContractFactory("ScrollChainCommitmentVerifier", deployer);
|
||||
|
||||
const L1ScrollChainAddress = process.env.L1_ZK_ROLLUP_PROXY_ADDR!;
|
||||
const L1ScrollChainAddress = process.env.L1_SCROLL_CHAIN_PROXY_ADDR!;
|
||||
let PoseidonUnit2Address = process.env.POSEIDON_UNIT2_ADDR;
|
||||
|
||||
if (!PoseidonUnit2Address) {
|
||||
|
||||
@@ -7,18 +7,18 @@ import {console} from "forge-std/console.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
import {TransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
|
||||
import {L1CustomERC20Gateway} from "../../src/L1/gateways/L1CustomERC20Gateway.sol";
|
||||
import {L1ERC1155Gateway} from "../../src/L1/gateways/L1ERC1155Gateway.sol";
|
||||
import {L1ERC721Gateway} from "../../src/L1/gateways/L1ERC721Gateway.sol";
|
||||
import {L1ETHGateway} from "../../src/L1/gateways/L1ETHGateway.sol";
|
||||
import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
|
||||
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
|
||||
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
|
||||
import {RollupVerifier} from "../../src/libraries/verifier/RollupVerifier.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
|
||||
@@ -29,14 +29,14 @@ contract DeployL1BridgeContracts is Script {
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
address L1_ZKEVM_VERIFIER_ADDR = vm.envAddress("L1_ZKEVM_VERIFIER_ADDR");
|
||||
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// note: the RollupVerifier library is deployed implicitly
|
||||
|
||||
deployMultipleVersionRollupVerifier();
|
||||
deployProxyAdmin();
|
||||
deployL1Whitelist();
|
||||
deployL1MessageQueue();
|
||||
@@ -55,6 +55,12 @@ contract DeployL1BridgeContracts is Script {
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
MultipleVersionRollupVerifier rollupVerifier = new MultipleVersionRollupVerifier(L1_ZKEVM_VERIFIER_ADDR);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
@@ -76,8 +82,8 @@ contract DeployL1BridgeContracts is Script {
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("L1_ZK_ROLLUP_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_ZK_ROLLUP_PROXY_ADDR", address(proxy));
|
||||
logAddress("L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_SCROLL_CHAIN_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1MessageQueue() internal {
|
||||
@@ -170,8 +176,8 @@ contract DeployL1BridgeContracts is Script {
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
logAddress("L1_ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1CustomERC20Gateway() internal {
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
import { console } from "forge-std/console.sol";
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import { ScrollChainCommitmentVerifier } from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
|
||||
import {ScrollChainCommitmentVerifier} from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
|
||||
|
||||
contract DeployScrollChainCommitmentVerifier is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
|
||||
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
|
||||
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployScrollChainCommitmentVerifier();
|
||||
deployScrollChainCommitmentVerifier();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployScrollChainCommitmentVerifier() internal {
|
||||
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
|
||||
POSEIDON_UNIT2_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR
|
||||
);
|
||||
function deployScrollChainCommitmentVerifier() internal {
|
||||
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
|
||||
POSEIDON_UNIT2_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR
|
||||
);
|
||||
|
||||
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
|
||||
}
|
||||
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,14 +20,13 @@ contract InitializeL1BridgeContracts is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envOr("MAX_L2_TX_IN_CHUNK", uint256(44));
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
|
||||
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
|
||||
|
||||
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
|
||||
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
|
||||
address L1_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_ROLLUP_VERIFIER_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
|
||||
address L2_GAS_PRICE_ORACLE_PROXY_ADDR = vm.envAddress("L2_GAS_PRICE_ORACLE_PROXY_ADDR");
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
@@ -38,7 +37,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
|
||||
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
@@ -55,8 +55,12 @@ contract InitializeL1BridgeContracts is Script {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// initialize ScrollChain
|
||||
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).initialize(L1_MESSAGE_QUEUE_PROXY_ADDR, L1_ROLLUP_VERIFIER_ADDR, MAX_L2_TX_IN_CHUNK);
|
||||
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR,
|
||||
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
|
||||
MAX_L2_TX_IN_CHUNK
|
||||
);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
|
||||
// initialize L2GasPriceOracle
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(0, 0, 0, 0);
|
||||
@@ -65,8 +69,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
// initialize L1MessageQueue
|
||||
L1MessageQueue(L1_MESSAGE_QUEUE_PROXY_ADDR).initialize(
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR,
|
||||
ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
|
||||
10000000
|
||||
);
|
||||
@@ -75,13 +79,13 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L1ScrollMessenger(payable(L1_SCROLL_MESSENGER_PROXY_ADDR)).initialize(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize EnforcedTxGateway
|
||||
EnforcedTxGateway(payable(ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
EnforcedTxGateway(payable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR
|
||||
);
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ async function main() {
|
||||
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const rollupAddr = process.env.L1_ZK_ROLLUP_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
|
||||
const rollupAddr = process.env.L1_SCROLL_CHAIN_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
|
||||
console.log("Using rollup proxy address:", rollupAddr);
|
||||
|
||||
const ScrollChain = await ethers.getContractAt("ScrollChain", rollupAddr, deployer);
|
||||
|
||||
@@ -51,10 +51,12 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
PublicKey: pk,
|
||||
}
|
||||
for id, sess := range m.sessions {
|
||||
if _, ok := sess.info.Rollers[pk]; ok {
|
||||
info.ActiveSessionStartTime = time.Unix(sess.info.StartTimestamp, 0)
|
||||
info.ActiveSession = id
|
||||
break
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
if sessionInfo.RollerPublicKey == pk {
|
||||
info.ActiveSessionStartTime = *sessionInfo.CreatedAt
|
||||
info.ActiveSession = id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, info)
|
||||
@@ -66,14 +68,14 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
now := time.Now()
|
||||
var nameList []string
|
||||
for pk := range sess.info.Rollers {
|
||||
nameList = append(nameList, sess.info.Rollers[pk].Name)
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
nameList = append(nameList, sessionInfo.RollerName)
|
||||
}
|
||||
info := SessionInfo{
|
||||
ID: sess.info.ID,
|
||||
ID: sess.taskID,
|
||||
Status: status.String(),
|
||||
AssignedRollers: nameList,
|
||||
StartTime: time.Unix(sess.info.StartTimestamp, 0),
|
||||
StartTime: *sess.sessionInfos[0].CreatedAt,
|
||||
Error: errMsg,
|
||||
}
|
||||
if finished {
|
||||
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
@@ -57,7 +57,8 @@ type rollerProofStatus struct {
|
||||
|
||||
// Contains all the information on an ongoing proof generation session.
|
||||
type session struct {
|
||||
info *types.SessionInfo
|
||||
taskID string
|
||||
sessionInfos []*types.SessionInfo
|
||||
// finish channel is used to pass the public key of the rollers who finished proving process.
|
||||
finishChan chan rollerProofStatus
|
||||
}
|
||||
@@ -248,24 +249,21 @@ func (m *Manager) restorePrevSessions() {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
sessionInfosMaps := make(map[string][]*types.SessionInfo)
|
||||
for _, v := range prevSessions {
|
||||
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
|
||||
v.RollerName, "prove type", v.ProveType, "public key", v.RollerPublicKey, "proof status", v.ProvingStatus)
|
||||
sessionInfosMaps[v.TaskID] = append(sessionInfosMaps[v.TaskID], v)
|
||||
}
|
||||
|
||||
for taskID, sessionInfos := range sessionInfosMaps {
|
||||
sess := &session{
|
||||
info: v,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
taskID: taskID,
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"prove type", sess.info.ProveType,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
m.sessions[taskID] = sess
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
|
||||
@@ -287,36 +285,40 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
|
||||
}
|
||||
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
|
||||
|
||||
var tmpSessionInfo *types.SessionInfo
|
||||
for _, si := range sess.sessionInfos {
|
||||
// get the send session info of this proof msg
|
||||
if si.TaskID == msg.ID && si.RollerPublicKey == pk {
|
||||
tmpSessionInfo = si
|
||||
}
|
||||
}
|
||||
|
||||
if tmpSessionInfo == nil {
|
||||
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
|
||||
}
|
||||
|
||||
proofTime := time.Since(*tmpSessionInfo.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == types.RollerProofValid {
|
||||
if types.RollerProveStatus(tmpSessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey,
|
||||
"prove type", tmpSessionInfo.ProveType,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
log.Info(
|
||||
"handling zk proof",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"proof time", proofTimeSec,
|
||||
)
|
||||
|
||||
log.Info("handling zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName, "roller pk",
|
||||
tmpSessionInfo.RollerPublicKey, "prove type", tmpSessionInfo.ProveType, "proof time", proofTimeSec)
|
||||
|
||||
defer func() {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
@@ -344,12 +346,12 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey,
|
||||
"prove type", msg.Type,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
@@ -383,8 +385,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
}
|
||||
|
||||
@@ -411,18 +413,32 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAttempts use the count of session info to check the attempts
|
||||
func (m *Manager) checkAttemptsExceeded(hash string) bool {
|
||||
sessionInfos, err := m.orm.GetSessionInfosByHashes([]string{hash})
|
||||
if err != nil {
|
||||
log.Error("get session info error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sessionInfos) >= int(m.cfg.SessionAttempts) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
coordinatorSessionsActiveNumberGauge.Inc(1)
|
||||
@@ -432,48 +448,47 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
select {
|
||||
//Execute after timeout, set in config.json. Consider all rollers failed.
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
// Check if session can be replayed
|
||||
if sess.info.Attempts < m.cfg.SessionAttempts {
|
||||
if !m.checkAttemptsExceeded(sess.taskID) {
|
||||
var success bool
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
|
||||
success = m.StartAggProofGenerationSession(nil, sess)
|
||||
} else if sess.info.ProveType == message.BasicProve {
|
||||
} else if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
|
||||
success = m.StartBasicProofGenerationSession(nil, sess)
|
||||
}
|
||||
if success {
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
log.Info("Retrying session", "session id:", sess.info.ID)
|
||||
log.Info("Retrying session", "session id:", sess.taskID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
log.Warn(errMsg, "session id", sess.taskID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if sess.info.ProveType == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
delete(m.sessions, sess.taskID)
|
||||
m.mu.Unlock()
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
return
|
||||
@@ -481,7 +496,12 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
for idx := range sess.sessionInfos {
|
||||
if sess.sessionInfos[idx].RollerPublicKey == ret.pk {
|
||||
sess.sessionInfos[idx].ProvingStatus = int16(ret.status)
|
||||
}
|
||||
}
|
||||
|
||||
if sess.isSessionFailed() {
|
||||
if ret.typ == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
@@ -493,12 +513,13 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
|
||||
if err := m.orm.UpdateSessionInfoProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
}
|
||||
|
||||
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
|
||||
finished, validRollers := sess.isRollersFinished()
|
||||
|
||||
@@ -508,11 +529,10 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
randIndex := rand.Int63n(int64(len(validRollers)))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(sessionInfo.RollerPublicKey, sessionInfo.TaskID)
|
||||
delete(m.sessions, sessionInfo.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
|
||||
coordinatorSessionsSuccessTotalCounter.Inc(1)
|
||||
@@ -528,14 +548,16 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
|
||||
func (s *session) isRollersFinished() (bool, []string) {
|
||||
var validRollers []string
|
||||
for pk, roller := range s.info.Rollers {
|
||||
if roller.Status == types.RollerProofValid {
|
||||
validRollers = append(validRollers, pk)
|
||||
for _, sessionInfo := range s.sessionInfos {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
validRollers = append(validRollers, sessionInfo.RollerPublicKey)
|
||||
continue
|
||||
}
|
||||
if roller.Status == types.RollerProofInvalid {
|
||||
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
|
||||
continue
|
||||
}
|
||||
|
||||
// Some rollers are still proving.
|
||||
return false, nil
|
||||
}
|
||||
@@ -543,8 +565,8 @@ func (s *session) isRollersFinished() (bool, []string) {
|
||||
}
|
||||
|
||||
func (s *session) isSessionFailed() bool {
|
||||
for _, roller := range s.info.Rollers {
|
||||
if roller.Status != types.RollerProofInvalid {
|
||||
for _, sessionInfo := range s.sessionInfos {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -573,7 +595,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
if task != nil {
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
|
||||
@@ -612,7 +634,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.BasicProve)
|
||||
if roller == nil {
|
||||
@@ -626,10 +648,27 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
now := time.Now()
|
||||
tmpSessionInfo := types.SessionInfo{
|
||||
TaskID: taskID,
|
||||
RollerPublicKey: roller.PublicKey,
|
||||
ProveType: int16(message.BasicProve),
|
||||
RollerName: roller.Name,
|
||||
CreatedAt: &now,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
sessionInfos = append(sessionInfos, &tmpSessionInfo)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.BasicProve, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
|
||||
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
if len(sessionInfos) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
return false
|
||||
}
|
||||
@@ -642,33 +681,9 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.BasicProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
taskID: taskID,
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -685,7 +700,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
if task != nil {
|
||||
taskID = task.ID
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
|
||||
@@ -715,7 +730,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.AggregatorProve)
|
||||
if roller == nil {
|
||||
@@ -732,11 +747,29 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
tmpSessionInfo := types.SessionInfo{
|
||||
TaskID: taskID,
|
||||
RollerPublicKey: roller.PublicKey,
|
||||
ProveType: int16(message.AggregatorProve),
|
||||
RollerName: roller.Name,
|
||||
CreatedAt: &now,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
sessionInfos = append(sessionInfos, &tmpSessionInfo)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.AggregatorProve, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
if len(sessionInfos) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
return false
|
||||
}
|
||||
@@ -749,33 +782,9 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.AggregatorProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
taskID: taskID,
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -789,7 +798,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
func (m *Manager) addFailedSession(sess *session, errMsg string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
|
||||
@@ -53,8 +53,8 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
|
||||
defer m.mu.RUnlock()
|
||||
taskIDs := cmap.New()
|
||||
for id, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == pubkey && roller.Status == types.RollerAssigned {
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
if sessionInfo.RollerPublicKey == pubkey && sessionInfo.ProvingStatus == int16(types.RollerAssigned) {
|
||||
taskIDs.Set(id, struct{}{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package verifier_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -16,23 +17,23 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
aggVkPath = "../assets/agg_vk"
|
||||
proofPath = "../assets/agg_proof"
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
aggVkPath = flag.String("vk", "/assets/agg_vk", "aggregation proof verification key path")
|
||||
proofPath = flag.String("proof", "/assets/agg_proof", "aggregation proof path")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: paramsPath,
|
||||
AggVkPath: aggVkPath,
|
||||
ParamsPath: *paramsPath,
|
||||
AggVkPath: *aggVkPath,
|
||||
}
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
f, err := os.Open(proofPath)
|
||||
f, err := os.Open(*proofPath)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -3,12 +3,21 @@
|
||||
|
||||
create table session_info
|
||||
(
|
||||
hash VARCHAR NOT NULL,
|
||||
rollers_info BYTEA NOT NULL
|
||||
);
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
task_id VARCHAR NOT NULL,
|
||||
roller_public_key VARCHAR NOT NULL,
|
||||
prove_type SMALLINT DEFAULT 0,
|
||||
roller_name VARCHAR NOT NULL,
|
||||
proving_status SMALLINT DEFAULT 1,
|
||||
failure_type SMALLINT DEFAULT 0,
|
||||
reward BIGINT DEFAULT 0,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
create unique index session_info_hash_uindex
|
||||
on session_info (hash);
|
||||
CONSTRAINT uk_session_unique UNIQUE (task_id, roller_public_key)
|
||||
);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ type BlockTraceOrm interface {
|
||||
type SessionInfoOrm interface {
|
||||
GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error)
|
||||
SetSessionInfo(rollersInfo *types.SessionInfo) error
|
||||
UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error
|
||||
}
|
||||
|
||||
// AggTaskOrm is aggregator task
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
type sessionInfoOrm struct {
|
||||
@@ -23,7 +24,7 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
query, args, err := sqlx.In("SELECT rollers_info FROM session_info WHERE hash IN (?);", hashes)
|
||||
query, args, err := sqlx.In("SELECT * FROM session_info WHERE task_id IN (?);", hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -35,15 +36,11 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for rows.Next() {
|
||||
var infoBytes []byte
|
||||
if err = rows.Scan(&infoBytes); err != nil {
|
||||
var sessionInfo types.SessionInfo
|
||||
if err = rows.StructScan(&sessionInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionInfo := &types.SessionInfo{}
|
||||
if err = json.Unmarshal(infoBytes, sessionInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionInfos = append(sessionInfos, sessionInfo)
|
||||
sessionInfos = append(sessionInfos, &sessionInfo)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
@@ -53,11 +50,16 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
}
|
||||
|
||||
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *types.SessionInfo) error {
|
||||
infoBytes, err := json.Marshal(rollersInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStr := "INSERT INTO session_info (hash, rollers_info) VALUES ($1, $2) ON CONFLICT (hash) DO UPDATE SET rollers_info = EXCLUDED.rollers_info;"
|
||||
_, err = o.db.Exec(sqlStr, rollersInfo.ID, infoBytes)
|
||||
sqlStr := "INSERT INTO session_info (task_id, roller_public_key, prove_type, roller_name, proving_status, failure_type, reward, proof, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT (task_id, roller_public_key) DO UPDATE SET proving_status = EXCLUDED.proving_status;"
|
||||
_, err := o.db.Exec(sqlStr, rollersInfo.TaskID, rollersInfo.RollerPublicKey, rollersInfo.ProveType, rollersInfo.RollerName,
|
||||
rollersInfo.ProvingStatus, rollersInfo.FailureType, rollersInfo.Reward, rollersInfo.Proof, rollersInfo.CreatedAt)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateSessionInfoProvingStatus update the session info proving status
|
||||
func (o *sessionInfoOrm) UpdateSessionInfoProvingStatus(ctx context.Context, proveType message.ProveType, taskID string, pk string, status types.RollerProveStatus) error {
|
||||
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update session_info set proving_status = ? where prove_type = ? and task_id = ? and roller_public_key = ? ;"), int(proveType), int(status), taskID, pk); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -410,31 +410,29 @@ func testOrmSessionInfo(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
|
||||
now := time.Now()
|
||||
sessionInfo := types.SessionInfo{
|
||||
ID: batchHash,
|
||||
Rollers: map[string]*types.RollerStatus{
|
||||
"0": {
|
||||
PublicKey: "0",
|
||||
Name: "roller-0",
|
||||
Status: types.RollerAssigned,
|
||||
},
|
||||
},
|
||||
StartTimestamp: time.Now().Unix()}
|
||||
TaskID: batchHash,
|
||||
RollerName: "roller-0",
|
||||
RollerPublicKey: "0",
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
CreatedAt: &now,
|
||||
}
|
||||
|
||||
// insert
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
assert.Equal(t, sessionInfo.RollerName, sessionInfos[0].RollerName)
|
||||
|
||||
// update
|
||||
sessionInfo.Rollers["0"].Status = types.RollerProofValid
|
||||
sessionInfo.ProvingStatus = int16(types.RollerProofValid)
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
assert.Equal(t, sessionInfo.ProvingStatus, sessionInfos[0].ProvingStatus)
|
||||
|
||||
// delete
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskVerified))
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -4,6 +4,7 @@ package prover_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,23 +17,23 @@ import (
|
||||
"scroll-tech/roller/prover"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
proofDumpPath = "agg_proof"
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
seedPath = flag.String("seed", "/assets/test_seed", "seed path")
|
||||
tracesPath = flag.String("traces", "/assets/traces", "traces dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/agg_proof", "the path proofs dump to")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.ProverConfig{
|
||||
ParamsPath: paramsPath,
|
||||
SeedPath: seedPath,
|
||||
ParamsPath: *paramsPath,
|
||||
SeedPath: *seedPath,
|
||||
}
|
||||
prover, err := prover.NewProver(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
files, err := os.ReadDir(tracesPath)
|
||||
files, err := os.ReadDir(*tracesPath)
|
||||
as.NoError(err)
|
||||
|
||||
traces := make([]*types.BlockTrace, 0)
|
||||
@@ -41,7 +42,7 @@ func TestFFI(t *testing.T) {
|
||||
f *os.File
|
||||
byt []byte
|
||||
)
|
||||
f, err = os.Open(filepath.Join(tracesPath, file.Name()))
|
||||
f, err = os.Open(filepath.Join(*tracesPath, file.Name()))
|
||||
as.NoError(err)
|
||||
byt, err = io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
@@ -54,10 +55,10 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("prove success")
|
||||
|
||||
// dump the proof
|
||||
os.RemoveAll(proofDumpPath)
|
||||
os.RemoveAll(*proofDumpPath)
|
||||
proofByt, err := json.Marshal(proof)
|
||||
as.NoError(err)
|
||||
proofFile, err := os.Create(proofDumpPath)
|
||||
proofFile, err := os.Create(*proofDumpPath)
|
||||
as.NoError(err)
|
||||
_, err = proofFile.Write(proofByt)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -231,30 +231,38 @@ func (r *Roller) prove() error {
|
||||
|
||||
var traces []*types.BlockTrace
|
||||
traces, err = r.getSortedTracesByHashes(task.Task.BlockHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
|
||||
// the proof will not be submitted.
|
||||
var proof *message.AggProof
|
||||
proof, err = r.prover.Prove(task.Task.ID, traces)
|
||||
if err != nil {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusProofError,
|
||||
Error: err.Error(),
|
||||
Error: "get traces failed",
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: &message.AggProof{},
|
||||
Proof: nil,
|
||||
}
|
||||
log.Error("prove block failed!", "task-id", task.Task.ID)
|
||||
log.Error("get traces failed!", "task-id", task.Task.ID, "err", err)
|
||||
} else {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusOk,
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: proof,
|
||||
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
|
||||
// the proof will not be submitted.
|
||||
var proof *message.AggProof
|
||||
proof, err = r.prover.Prove(task.Task.ID, traces)
|
||||
if err != nil {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusProofError,
|
||||
Error: err.Error(),
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: nil,
|
||||
}
|
||||
log.Error("prove block failed!", "task-id", task.Task.ID)
|
||||
} else {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusOk,
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: proof,
|
||||
}
|
||||
log.Info("prove block successfully!", "task-id", task.Task.ID)
|
||||
}
|
||||
log.Info("prove block successfully!", "task-id", task.Task.ID)
|
||||
}
|
||||
} else {
|
||||
// when the roller has more than 3 times panic,
|
||||
@@ -264,7 +272,7 @@ func (r *Roller) prove() error {
|
||||
Error: "zk proving panic",
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: &message.AggProof{},
|
||||
Proof: nil,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user