Compare commits

..

5 Commits

16 changed files with 155 additions and 184 deletions

View File

@@ -29,6 +29,25 @@ jobs:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
@@ -47,13 +66,6 @@ jobs:
- name: Test
run: |
make roller
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

View File

@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
)
type l2CrossMsgOrm struct {
@@ -143,7 +144,7 @@ func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash in ($1) AND msg_type = $2 AND deleted_at IS NULL;`, msgHashList, Layer2Msg)
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash in ($1) AND msg_type = $2 AND deleted_at IS NULL;`, pq.Array(msgHashList), Layer2Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {

View File

@@ -29,7 +29,6 @@ var (
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
@@ -394,15 +393,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(int64(count))
log.Info("Skipping batches", "count", count)
}
// retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{
"rollup_status": types.RollupCommitted,
@@ -430,11 +420,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
@@ -454,8 +439,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}

View File

@@ -90,7 +90,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
@@ -108,67 +108,6 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerSkipBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batch.Hash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped),
createBatch(types.RollupCommitted, types.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped),
createBatch(types.RollupPending, types.ProvingTaskFailed),
createBatch(types.RollupCommitting, types.ProvingTaskFailed),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed),
createBatch(types.RollupFinalized, types.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
}
for _, id := range notSkipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
}
}
func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)

View File

@@ -97,7 +97,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)

View File

@@ -343,6 +343,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap
}
// adjust for rising basefee
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}

View File

@@ -65,6 +65,7 @@ func TestSender(t *testing.T) {
t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -154,6 +155,43 @@ func testResubmitTransaction(t *testing.T) {
}
}
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig

View File

@@ -280,25 +280,6 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
return &newBatch, nil
}
// UpdateSkippedBatches updates the skipped batches in the database.
func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status", int(types.RollupCommitted))
db = db.Where("proving_status IN (?)", provingStatusList)
result := db.Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, fmt.Errorf("Batch.UpdateSkippedBatches error: %w", result.Error)
}
return uint64(result.RowsAffected), nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})

View File

@@ -202,27 +202,6 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted)
assert.NoError(t, err)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(0), count)
batch, err := batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus))
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)

View File

@@ -138,8 +138,6 @@ const (
ProvingStatusUndefined ProvingStatus = iota
// ProvingTaskUnassigned : proving_task is not assigned to be proved
ProvingTaskUnassigned
// ProvingTaskSkipped : proving_task is skipped for proof generation
ProvingTaskSkipped
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover
@@ -154,8 +152,6 @@ func (ps ProvingStatus) String() string {
switch ps {
case ProvingTaskUnassigned:
return "unassigned"
case ProvingTaskSkipped:
return "skipped"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProved:
@@ -210,8 +206,6 @@ const (
RollupFinalizing
// RollupFinalized : finalize transaction is confirmed to layer1
RollupFinalized
// RollupFinalizationSkipped : batch finalization is skipped
RollupFinalizationSkipped
// RollupCommitFailed : rollup commit transaction confirmed but failed
RollupCommitFailed
// RollupFinalizeFailed : rollup finalize transaction is confirmed but failed
@@ -230,8 +224,6 @@ func (s RollupStatus) String() string {
return "RollupFinalizing"
case RollupFinalized:
return "RollupFinalized"
case RollupFinalizationSkipped:
return "RollupFinalizationSkipped"
case RollupCommitFailed:
return "RollupCommitFailed"
case RollupFinalizeFailed:

View File

@@ -52,11 +52,6 @@ func TestProvingStatus(t *testing.T) {
ProvingTaskUnassigned,
"unassigned",
},
{
"ProvingTaskSkipped",
ProvingTaskSkipped,
"skipped",
},
{
"ProvingTaskAssigned",
ProvingTaskAssigned,

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.17"
var tag = "v4.0.20"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -66,10 +66,16 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
mapping(bytes32 => ReplayState) public replayStates;
/// @notice Mapping from queue index to previous replay queue index.
///
/// @dev If a message `x` was replayed 3 times with index `q1`, `q2` and `q3`, the
/// value of `prevReplayIndex` and `replayStates` will be `replayStates[hash(x)].lastIndex = q3`,
/// `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1`
/// and `prevReplayIndex[q1] = x`.
/// `replayStates[hash(x)].times = 3`, `prevReplayIndex[q3] = q2`, `prevReplayIndex[q2] = q1`,
/// `prevReplayIndex[q1] = x` and `prevReplayIndex[x]=nil`.
///
/// @dev The index `x` that `prevReplayIndex[x]=nil` is used as the termination of the list.
/// Usually we use `0` to represent `nil`, but we cannot distinguish it with the first message
/// with index zero. So a nonzero offset `1` is added to the value of `prevReplayIndex[x]` to
/// avoid such situation.
mapping(uint256 => uint256) public prevReplayIndex;
/***************
@@ -200,11 +206,13 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
ReplayState memory _replayState = replayStates[_xDomainCalldataHash];
// update the replayed message chain.
if (_replayState.lastIndex == 0) {
// the message has not been replayed before.
prevReplayIndex[_nextQueueIndex] = _messageNonce;
} else {
prevReplayIndex[_nextQueueIndex] = _replayState.lastIndex;
unchecked {
if (_replayState.lastIndex == 0) {
// the message has not been replayed before.
prevReplayIndex[_nextQueueIndex] = _messageNonce + 1;
} else {
prevReplayIndex[_nextQueueIndex] = _replayState.lastIndex + 1;
}
}
_replayState.lastIndex = uint128(_nextQueueIndex);
@@ -265,6 +273,9 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
IL1MessageQueue(_messageQueue).dropCrossDomainMessage(_lastIndex);
_lastIndex = prevReplayIndex[_lastIndex];
if (_lastIndex == 0) break;
unchecked {
_lastIndex = _lastIndex - 1;
}
}
isL1MessageDropped[_xDomainCalldataHash] = true;

View File

@@ -144,9 +144,9 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
(_replayTimes, _lastIndex) = l1Messenger.replayStates(hash);
assertEq(_replayTimes, i + 1);
assertEq(_lastIndex, i + 3);
assertEq(l1Messenger.prevReplayIndex(i + 3), i + 2);
assertEq(l1Messenger.prevReplayIndex(i + 3), i + 2 + 1);
for (uint256 j = 0; j <= i; j++) {
assertEq(l1Messenger.prevReplayIndex(i + 3 - j), i + 2 - j);
assertEq(l1Messenger.prevReplayIndex(i + 3 - j), i + 2 - j + 1);
}
}
}
@@ -239,51 +239,76 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
l1Messenger.updateMaxReplayTimes(10);
// replay 3 times
for (uint256 i = 0; i < 3; i++) {
l1Messenger.replayMessage(address(this), address(0), 0, 0, new bytes(0), 0, address(0));
}
assertEq(messageQueue.nextCrossDomainMessageIndex(), 4);
// replay 1 time
l1Messenger.replayMessage(address(this), address(0), 0, 0, new bytes(0), 0, address(0));
assertEq(messageQueue.nextCrossDomainMessageIndex(), 2);
// only first 3 are skipped
// skip all 2 messages
hevm.startPrank(address(rollup));
messageQueue.popCrossDomainMessage(0, 4, 0x7);
assertEq(messageQueue.pendingQueueIndex(), 4);
messageQueue.popCrossDomainMessage(0, 2, 0x3);
assertEq(messageQueue.pendingQueueIndex(), 2);
hevm.stopPrank();
// message already dropped or executed, revert
hevm.expectRevert("message already dropped or executed");
l1Messenger.dropMessage(address(this), address(0), 0, 0, new bytes(0));
// send one message with nonce 4 and replay 4 times
l1Messenger.sendMessage(address(0), 0, new bytes(0), 0);
for (uint256 i = 0; i < 4; i++) {
l1Messenger.replayMessage(address(this), address(0), 0, 4, new bytes(0), 0, address(0));
}
assertEq(messageQueue.nextCrossDomainMessageIndex(), 9);
// skip all 5 messages
hevm.startPrank(address(rollup));
messageQueue.popCrossDomainMessage(4, 5, 0x1f);
assertEq(messageQueue.pendingQueueIndex(), 9);
hevm.stopPrank();
for (uint256 i = 4; i < 9; ++i) {
for (uint256 i = 0; i < 2; ++i) {
assertGt(uint256(messageQueue.getCrossDomainMessage(i)), 0);
}
hevm.expectEmit(false, false, false, true);
emit OnDropMessageCalled(new bytes(0));
l1Messenger.dropMessage(address(this), address(0), 0, 4, new bytes(0));
for (uint256 i = 4; i < 9; ++i) {
l1Messenger.dropMessage(address(this), address(0), 0, 0, new bytes(0));
for (uint256 i = 0; i < 2; ++i) {
assertEq(messageQueue.getCrossDomainMessage(i), bytes32(0));
}
// send one message with nonce 2 and replay 3 times
l1Messenger.sendMessage(address(0), 0, new bytes(0), 0);
assertEq(messageQueue.nextCrossDomainMessageIndex(), 3);
for (uint256 i = 0; i < 3; i++) {
l1Messenger.replayMessage(address(this), address(0), 0, 2, new bytes(0), 0, address(0));
}
assertEq(messageQueue.nextCrossDomainMessageIndex(), 6);
// only first 3 are skipped
hevm.startPrank(address(rollup));
messageQueue.popCrossDomainMessage(2, 4, 0x7);
assertEq(messageQueue.pendingQueueIndex(), 6);
hevm.stopPrank();
// message already dropped or executed, revert
hevm.expectRevert("message already dropped or executed");
l1Messenger.dropMessage(address(this), address(0), 0, 2, new bytes(0));
// send one message with nonce 6 and replay 4 times
l1Messenger.sendMessage(address(0), 0, new bytes(0), 0);
for (uint256 i = 0; i < 4; i++) {
l1Messenger.replayMessage(address(this), address(0), 0, 6, new bytes(0), 0, address(0));
}
assertEq(messageQueue.nextCrossDomainMessageIndex(), 11);
// skip all 5 messages
hevm.startPrank(address(rollup));
messageQueue.popCrossDomainMessage(6, 5, 0x1f);
assertEq(messageQueue.pendingQueueIndex(), 11);
hevm.stopPrank();
for (uint256 i = 6; i < 11; ++i) {
assertGt(uint256(messageQueue.getCrossDomainMessage(i)), 0);
}
hevm.expectEmit(false, false, false, true);
emit OnDropMessageCalled(new bytes(0));
l1Messenger.dropMessage(address(this), address(0), 0, 6, new bytes(0));
for (uint256 i = 6; i < 11; ++i) {
assertEq(messageQueue.getCrossDomainMessage(i), bytes32(0));
}
// Message already dropped, revert
hevm.expectRevert("Message already dropped");
l1Messenger.dropMessage(address(this), address(0), 0, 4, new bytes(0));
l1Messenger.dropMessage(address(this), address(0), 0, 0, new bytes(0));
hevm.expectRevert("Message already dropped");
l1Messenger.dropMessage(address(this), address(0), 0, 6, new bytes(0));
// replay dropped message, revert
hevm.expectRevert("Message already dropped");
l1Messenger.replayMessage(address(this), address(0), 0, 4, new bytes(0), 0, address(0));
l1Messenger.replayMessage(address(this), address(0), 0, 0, new bytes(0), 0, address(0));
hevm.expectRevert("Message already dropped");
l1Messenger.replayMessage(address(this), address(0), 0, 6, new bytes(0), 0, address(0));
}
function onDropMessage(bytes memory message) external payable {

View File

@@ -35,7 +35,7 @@ create table chunk
);
comment
on column chunk.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
on column chunk.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
create unique index chunk_index_uindex
on chunk (index);

View File

@@ -49,10 +49,10 @@ comment
on column batch.chunk_proofs_status is 'undefined, pending, ready';
comment
on column batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
on column batch.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
comment
on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
on column batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, commit_failed, finalize_failed';
-- +goose StatementEnd