Compare commits

..

19 Commits

Author SHA1 Message Date
HAOYUatHZ
40aca8558b skip finalization sequence check 2024-04-03 11:21:44 +08:00
HAOYUatHZ
bd26d2d86c Merge commit 'c1afa61d' into 4844-env1-add-append 2024-04-01 15:51:49 +08:00
HAOYUatHZ
66ad90b547 fix 2024-03-28 22:51:39 +08:00
HAOYUatHZ
669456f8cc add queueTransaction to import message hashes directly 2024-03-27 16:04:46 +08:00
Péter Garamvölgyi
c1afa61da8 skip batch proof for local testing 2024-03-21 16:42:57 +01:00
Péter Garamvölgyi
122c6f0577 fix batch proof for local testing 2024-03-21 15:40:19 +01:00
zimpha
cccec61b7e merge with develop branch 2024-03-19 17:01:15 +08:00
zimpha
c3e8a3ffa6 fix solc version 2024-03-19 12:23:50 +08:00
zimpha
722b77c269 fix ci for contracts 2024-03-19 12:07:31 +08:00
zimpha
800f3c2674 add decorator in codec 2024-03-19 11:51:26 +08:00
zimpha
055b70ebc9 use version and batch index to choose verifier 2024-03-18 12:27:22 +08:00
zimpha
7764f3ae7a fix comments 2024-03-18 10:13:33 +08:00
zimpha
25f32b5967 fix comments 2024-03-15 15:23:18 +08:00
zimpha
3bb7c63621 Merge remote-tracking branch 'origin/develop' into feat/4844_contract_v1_codec 2024-03-15 15:19:38 +08:00
zimpha
febec99f79 add blob tests 2024-03-15 15:19:17 +08:00
zimpha
cdb061aa0c use ethers-v6 and fix unit tests 2024-03-15 00:16:53 +08:00
zimpha
45de8e491c integrate 4844 encoding 2024-03-14 00:35:58 +08:00
zimpha
6490b70893 Merge remote-tracking branch 'origin/develop' into feat/4844_contract_v1_codec 2024-03-12 17:47:15 +08:00
zimpha
b876b5649d add v1 codec 2024-03-06 15:01:04 +08:00
62 changed files with 1672 additions and 3685 deletions

View File

@@ -19,7 +19,7 @@ CAPELLA_FORK_VERSION: 0x20000092
MAX_WITHDRAWALS_PER_PAYLOAD: 16
# Deneb
DENEB_FORK_EPOCH: 0
DENEB_FORK_EPOCH: 1
DENEB_FORK_VERSION: 0x20000093
# Time parameters

View File

@@ -19,7 +19,7 @@ services:
command:
- testnet
- generate-genesis
- --fork=deneb
- --fork=capella
- --num-validators=64
- --genesis-time-delay=3
- --output-ssz=/data/consensus/genesis.ssz

View File

@@ -15,7 +15,7 @@
"archimedesBlock": 0,
"shanghaiBlock": 0,
"clique": {
"period": 1,
"period": 3,
"epoch": 30000
},
"scroll": {

View File

@@ -1,7 +1,6 @@
package forks
import (
"math"
"math/big"
"sort"
@@ -9,47 +8,30 @@ import (
)
// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
type nameFork struct {
name string
block *big.Int
}
forkHeightNameMap := make(map[uint64]string)
for _, fork := range []nameFork{
{name: "homestead", block: config.HomesteadBlock},
{name: "daoFork", block: config.DAOForkBlock},
{name: "eip150", block: config.EIP150Block},
{name: "eip155", block: config.EIP155Block},
{name: "eip158", block: config.EIP158Block},
{name: "byzantium", block: config.ByzantiumBlock},
{name: "constantinople", block: config.ConstantinopleBlock},
{name: "petersburg", block: config.PetersburgBlock},
{name: "istanbul", block: config.IstanbulBlock},
{name: "muirGlacier", block: config.MuirGlacierBlock},
{name: "berlin", block: config.BerlinBlock},
{name: "london", block: config.LondonBlock},
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
{name: "archimedes", block: config.ArchimedesBlock},
{name: "shanghai", block: config.ShanghaiBlock},
{name: "banach", block: config.BanachBlock},
} {
if fork.block == nil {
continue
}
height := fork.block.Uint64()
// only keep latest fork for at each height, discard the rest
forkHeightNameMap[height] = fork.name
}
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool) {
forkHeightsMap := make(map[uint64]bool)
forkNameHeightMap := make(map[string]uint64)
for height, name := range forkHeightNameMap {
forkHeightsMap[height] = true
forkNameHeightMap[name] = height
for _, fork := range []*big.Int{
config.HomesteadBlock,
config.DAOForkBlock,
config.EIP150Block,
config.EIP155Block,
config.EIP158Block,
config.ByzantiumBlock,
config.ConstantinopleBlock,
config.PetersburgBlock,
config.IstanbulBlock,
config.MuirGlacierBlock,
config.BerlinBlock,
config.LondonBlock,
config.ArrowGlacierBlock,
config.ArchimedesBlock,
config.ShanghaiBlock,
} {
if fork == nil {
continue
} else if height := fork.Uint64(); height != 0 {
forkHeightsMap[height] = true
}
}
var forkHeights []uint64
@@ -59,7 +41,7 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
sort.Slice(forkHeights, func(i, j int) bool {
return forkHeights[i] < forkHeights[j]
})
return forkHeights, forkHeightsMap, forkNameHeightMap
return forkHeights, forkHeightsMap
}
// BlocksUntilFork returns the number of blocks until the next fork
@@ -72,17 +54,3 @@ func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
}
return 0
}
// BlockRange returns the block range of the hard fork
// Need ensure the forkHeights is incremental
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
to = math.MaxInt64
for _, height := range forkHeights {
if currentForkHeight < height {
to = height
return
}
from = height
}
return
}

View File

@@ -1,7 +1,6 @@
package forks
import (
"math"
"math/big"
"testing"
@@ -10,27 +9,20 @@ import (
)
func TestCollectSortedForkBlocks(t *testing.T) {
l, m, n := CollectSortedForkHeights(&params.ChainConfig{
l, m := CollectSortedForkHeights(&params.ChainConfig{
EIP155Block: big.NewInt(4),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(3),
ConstantinopleBlock: big.NewInt(0),
})
require.Equal(t, l, []uint64{
0,
3,
4,
})
require.Equal(t, map[uint64]bool{
3: true,
4: true,
0: true,
}, m)
require.Equal(t, map[string]uint64{
"eip155": 4,
"byzantium": 3,
"constantinople": 0,
}, n)
}
func TestBlocksUntilFork(t *testing.T) {
@@ -72,71 +64,3 @@ func TestBlocksUntilFork(t *testing.T) {
})
}
}
func TestBlockRange(t *testing.T) {
tests := []struct {
name string
forkHeight uint64
forkHeights []uint64
expectedFrom uint64
expectedTo uint64
}{
{
name: "ToInfinite",
forkHeight: 300,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 300,
expectedTo: math.MaxInt64,
},
{
name: "To300",
forkHeight: 200,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 200,
expectedTo: 300,
},
{
name: "To200",
forkHeight: 100,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To100",
forkHeight: 0,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 0,
expectedTo: 100,
},
{
name: "To200-1",
forkHeight: 100,
forkHeights: []uint64{100, 200},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To2",
forkHeight: 1,
forkHeights: []uint64{1, 2},
expectedFrom: 1,
expectedTo: 2,
},
{
name: "ToInfinite-1",
forkHeight: 0,
forkHeights: []uint64{0},
expectedFrom: 0,
expectedTo: math.MaxInt64,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
from, to := BlockRange(test.forkHeight, test.forkHeights)
require.Equal(t, test.expectedFrom, from)
require.Equal(t, test.expectedTo, to)
})
}
}

View File

@@ -442,9 +442,9 @@ func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
}
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
func EstimateBatchL1CommitCalldataSize(c *encoding.Batch) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
for _, chunk := range c.Chunks {
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err

View File

@@ -210,10 +210,6 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
return nil, fmt.Errorf("too many chunks in batch")
}
if len(batch.Chunks) == 0 {
return nil, fmt.Errorf("too few chunks in batch")
}
// batch data hash
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
if err != nil {
@@ -288,6 +284,9 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// the raw (un-padded) blob payload
blobBytes := make([]byte, metadataLength)
// the number of chunks that contain at least one L2 transaction
numNonEmptyChunks := 0
// challenge digest preimage
// 1 hash for metadata and 1 for each chunk
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
@@ -295,47 +294,42 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// the challenge point z
var z kzg4844.Point
// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
// blob metadata: num_chunks
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
// encode blob metadata and L2 transactions,
// and simultaneously also build challenge preimage
for chunkID, chunk := range chunks {
currentChunkStartIndex := len(blobBytes)
hasL2Tx := false
for _, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
hasL2Tx = true
// encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, nil, err
}
blobBytes = append(blobBytes, rlpTxData...)
continue
}
}
}
// blob metadata: chunki_size
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
chunkSize := len(blobBytes) - currentChunkStartIndex
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
if hasL2Tx {
numNonEmptyChunks++
}
// challenge: compute chunk data hash
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
hash := crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
copy(challengePreimage[32+chunkID*32:], hash[:])
}
// if we have fewer than MaxNumChunks chunks, the rest
// of the blob metadata is correctly initialized to 0,
// but we need to add padding to the challenge preimage
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
// use the last chunk's data hash as padding
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
// blob metadata: num_chunks
binary.BigEndian.PutUint16(blobBytes[0:], uint16(numNonEmptyChunks))
// challenge: compute metadata hash
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
@@ -449,55 +443,8 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
return BlobDataProofArgs.Pack(values...)
}
// Blob returns the blob of the batch.
func (b *DABatch) Blob() *kzg4844.Blob {
return b.blob
}
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
// TODO: implement this function.
return nil, nil, nil
}
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
return paddedSize, nil
}
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks)
var batchDataSize uint64
for _, c := range b.Chunks {
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
batchDataSize += chunkDataSize
}
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
return paddedSize, nil
}
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
var dataSize uint64
for _, block := range c.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return 0, err
}
dataSize += uint64(len(rlpTxData))
}
}
}
return dataSize, nil
}

File diff suppressed because one or more lines are too long

View File

@@ -8,17 +8,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
)
// CodecVersion defines the version of encoder and decoder.
type CodecVersion int
const (
// CodecV0 represents the version 0 of the encoder and decoder.
CodecV0 CodecVersion = iota
// CodecV1 represents the version 1 of the encoder and decoder.
CodecV1
)
// Block represents an L2 block.
type Block struct {
Header *types.Header
@@ -220,3 +209,8 @@ func (b *Batch) WithdrawRoot() common.Hash {
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
}
// NumChunks gets the number of chunks of the batch.
func (b *Batch) NumChunks() uint64 {
return uint64(len(b.Chunks))
}

View File

@@ -75,6 +75,7 @@ func TestUtilFunctions(t *testing.T) {
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
// Test Batch methods
assert.Equal(t, uint64(3), batch.NumChunks())
assert.Equal(t, block6.Header.Root, batch.StateRoot())
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
}

View File

@@ -3,16 +3,11 @@ package utils
import (
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
"github.com/modern-go/reflect2"
"github.com/scroll-tech/go-ethereum/core"
)
// TryTimes try run several times until the function return true.
@@ -64,17 +59,3 @@ func RandomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(5000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
// ReadGenesis parses and returns the genesis file at the given path
func ReadGenesis(genesisPath string) (*core.Genesis, error) {
file, err := os.Open(filepath.Clean(genesisPath))
if err != nil {
return nil, err
}
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
return nil, errors.Join(err, file.Close())
}
return genesis, file.Close()
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.77"
var tag = "v4.3.74"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -83,6 +83,45 @@ Return the batch hash of a committed batch.
|---|---|---|
| _0 | bytes32 | undefined |
### finalizeBatch
```solidity
function finalizeBatch(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot) external nonpayable
```
Finalize a committed batch on layer 1 without providing proof.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _prevStateRoot | bytes32 | undefined |
| _postStateRoot | bytes32 | undefined |
| _withdrawRoot | bytes32 | undefined |
### finalizeBatch4844
```solidity
function finalizeBatch4844(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _blobDataProof) external nonpayable
```
Finalize a committed batch (with blob) on layer 1 without providing proof.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _prevStateRoot | bytes32 | undefined |
| _postStateRoot | bytes32 | undefined |
| _withdrawRoot | bytes32 | undefined |
| _blobDataProof | bytes | undefined |
### finalizeBatchWithProof
```solidity

View File

@@ -99,6 +99,18 @@ interface IScrollChain {
bytes calldata aggrProof
) external;
/// @notice Finalize a committed batch on layer 1 without providing proof.
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch.
/// @param prevStateRoot The state root of parent batch.
/// @param postStateRoot The state root of current batch.
/// @param withdrawRoot The withdraw trie root of current batch.
function finalizeBatch(
bytes calldata batchHeader,
bytes32 prevStateRoot,
bytes32 postStateRoot,
bytes32 withdrawRoot
) external;
/// @notice Finalize a committed batch (with blob) on layer 1.
///
/// @dev Memory layout of `blobDataProof`:
@@ -120,4 +132,18 @@ interface IScrollChain {
bytes calldata blobDataProof,
bytes calldata aggrProof
) external;
/// @notice Finalize a committed batch (with blob) on layer 1 without providing proof.
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch`.
/// @param prevStateRoot The state root of parent batch.
/// @param postStateRoot The state root of current batch.
/// @param withdrawRoot The withdraw trie root of current batch.
/// @param blobDataProof The proof for blob data.
function finalizeBatch4844(
bytes calldata batchHeader,
bytes32 prevStateRoot,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata blobDataProof
) external;
}

View File

@@ -402,6 +402,13 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
emit UpdateMaxGasLimit(_oldMaxGasLimit, _newMaxGasLimit);
}
function appendHashes(uint256 _fromQueueIndex, bytes32[] memory _hashes) external {
require(_fromQueueIndex == messageQueue.length, "messageQueue index mismatch");
for (uint256 i = 0; i < _hashes.length; i++) {
messageQueue.push(_hashes[i]);
}
}
/**********************
* Internal Functions *
**********************/

View File

@@ -392,8 +392,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(memPtr);
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
@@ -408,8 +408,47 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV0Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @inheritdoc IScrollChain
function finalizeBatch(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot
) external override OnlyProver whenNotPaused {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
@@ -417,11 +456,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV0Codec.getL1MessagePopped(memPtr)
);
// _popL1Messages(
// BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV0Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
@@ -461,8 +500,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
@@ -489,20 +528,73 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV1Codec.getL1MessagePopped(memPtr)
);
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV1Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @inheritdoc IScrollChain
function finalizeBatch4844(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata _blobDataProof
) external override OnlyProver whenNotPaused {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
// Calls the point evaluation precompile and verifies the output
{
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
abi.encodePacked(_blobVersionedHash, _blobDataProof)
);
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
(, uint256 result) = abi.decode(data, (uint256, uint256));
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// // verify previous state root.
// if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
// if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
if (_batchIndex > lastFinalizedBatchIndex) lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// // Pop finalized and non-skipped message from L1MessageQueue.
// _popL1Messages(
// BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
// BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
// BatchHeaderV1Codec.getL1MessagePopped(memPtr)
// );
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
@@ -50,6 +49,7 @@ func action(ctx *cli.Context) error {
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
@@ -60,16 +60,10 @@ func action(ctx *cli.Context) error {
}
}()
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
apiSrv := apiServer(ctx, cfg, genesis.Config, db, registry)
apiSrv := apiServer(ctx, cfg, db, registry)
log.Info(
"Start coordinator api successfully.",
@@ -96,9 +90,9 @@ func action(ctx *cli.Context) error {
return nil
}
func apiServer(ctx *cli.Context, cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
func apiServer(ctx *cli.Context, cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *http.Server {
router := gin.New()
api.InitController(cfg, chainCfg, db, reg)
api.InitController(cfg, db, reg)
route.Route(router, cfg, reg)
port := ctx.String(httpPortFlag.Name)
srv := &http.Server{

View File

@@ -10,8 +10,6 @@ import (
"testing"
"time"
"github.com/scroll-tech/go-ethereum/params"
coordinatorConfig "scroll-tech/coordinator/internal/config"
"scroll-tech/common/cmd"
@@ -25,35 +23,29 @@ var (
// CoordinatorApp coordinator-test client manager.
type CoordinatorApp struct {
Config *coordinatorConfig.Config
ChainConfig *params.ChainConfig
Config *coordinatorConfig.Config
base *docker.App
configOriginFile string
chainConfigOriginFile string
coordinatorFile string
genesisFile string
HTTPPort int64
originFile string
coordinatorFile string
HTTPPort int64
args []string
docker.AppAPI
}
// NewCoordinatorApp return a new coordinatorApp manager.
func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile string) *CoordinatorApp {
func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", base.Timestamp)
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
httpPort := port.Int64() + httpStartPort
coordinatorApp := &CoordinatorApp{
base: base,
configOriginFile: configFile,
chainConfigOriginFile: chainConfigFile,
coordinatorFile: coordinatorFile,
genesisFile: genesisFile,
HTTPPort: httpPort,
args: []string{"--log.debug", "--config", coordinatorFile, "--genesis", genesisFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
base: base,
originFile: file,
coordinatorFile: coordinatorFile,
HTTPPort: httpPort,
args: []string{"--log.debug", "--config", coordinatorFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
}
if err := coordinatorApp.MockConfig(true); err != nil {
panic(err)
@@ -83,7 +75,7 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
// MockConfig creates a new coordinator config.
func (c *CoordinatorApp) MockConfig(store bool) error {
base := c.base
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
cfg, err := coordinatorConfig.NewConfig(c.originFile)
if err != nil {
return err
}
@@ -103,30 +95,14 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.Auth.LoginExpireDurationSec = 1
c.Config = cfg
genesis, err := utils.ReadGenesis(c.chainConfigOriginFile)
if err != nil {
return err
}
c.ChainConfig = genesis.Config
if !store {
return nil
}
coordinatorConfigData, err := json.Marshal(c.Config)
if err != nil {
return err
}
genesisConfigData, err := json.Marshal(genesis)
data, err := json.Marshal(c.Config)
if err != nil {
return err
}
if writeErr := os.WriteFile(c.coordinatorFile, coordinatorConfigData, 0600); writeErr != nil {
return writeErr
}
if writeErr := os.WriteFile(c.genesisFile, genesisConfigData, 0600); writeErr != nil {
return writeErr
}
return nil
return os.WriteFile(c.coordinatorFile, data, 0600)
}

View File

@@ -1,8 +1,9 @@
package api
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
@@ -16,16 +17,20 @@ var (
SubmitProof *SubmitProofController
// Auth the auth controller
Auth *AuthController
initControllerOnce sync.Once
)
// InitController inits Controller with database
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
func InitController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) {
initControllerOnce.Do(func() {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
})
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
@@ -24,9 +23,9 @@ type GetTaskController struct {
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, vf.BatchVK, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"math"
"time"
"github.com/gin-gonic/gin"
@@ -12,10 +11,8 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -30,21 +27,16 @@ type BatchProverTask struct {
BaseProverTask
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskTotal prometheus.Counter
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -54,10 +46,10 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
}),
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
batchTaskGetTaskTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.",
}, []string{"fork_name"}),
}),
}
return bp
}
@@ -69,48 +61,13 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
return nil, err
}
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
// so the hard fork chunk's start_block_number must be ForkBlockNumber
var startChunkIndex uint64 = 0
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
return nil, nil
}
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
if err != nil {
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
// toChunk being nil only indicates that we haven't yet reached the fork boundary
// don't need change the endChunkIndex of math.MaxInt64
endChunkIndex = toChunk.Index
}
}
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
var batchTask *orm.Batch
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -119,7 +76,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -179,7 +136,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
bp.batchTaskGetTaskTotal.Inc()
return taskMsg, nil
}

View File

@@ -10,10 +10,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -23,25 +21,24 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskTotal prometheus.Counter
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -51,10 +48,10 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
}),
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
chunkTaskGetTaskTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.",
}, []string{"fork_name"}),
}),
}
return cp
}
@@ -66,24 +63,13 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
return nil, err
}
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
if toBlockNum > getTaskParameter.ProverHeight {
toBlockNum = getTaskParameter.ProverHeight + 1
}
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
var chunkTask *orm.Chunk
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -92,7 +78,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -151,7 +137,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
cp.chunkTaskGetTaskTotal.Inc()
return taskMsg, nil
}

View File

@@ -13,12 +13,6 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
// ProverTask the interface of a collector who send data to prover
type ProverTask interface {
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
@@ -30,9 +24,6 @@ type BaseProverTask struct {
db *gorm.DB
vk string
nameForkMap map[string]uint64
forkHeights []uint64
batchOrm *orm.Batch
chunkOrm *orm.Chunk
blockOrm *orm.L2Block
@@ -100,18 +91,3 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
}
return &ptc, nil
}
func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) {
// when the first hard fork upgrade, the prover don't pass the fork_name to coordinator.
// so coordinator need to be compatible.
if forkName == "" {
return 0, nil
}
hardForkNumber, exist := b.nameForkMap[forkName]
if !exist {
return 0, ErrHardForkName
}
return hardForkNumber, nil
}

View File

@@ -203,17 +203,17 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
}
func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
batch, err := m.chunkOrm.GetChunkByHash(ctx, chunkHash)
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash)
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batch.BatchHash, types.ChunkProofsStatusReady)
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batchHash, types.ChunkProofsStatusReady)
if err != nil {
return err
}

View File

@@ -72,14 +72,12 @@ func (*Batch) TableName() string {
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch
err := db.First(&batch).Error
@@ -95,14 +93,12 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
// GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch
err := db.First(&batch).Error

View File

@@ -70,14 +70,13 @@ func (*Chunk) TableName() string {
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
func (o *Chunk) GetUnassignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
db = db.Where("end_block_number <= ?", height)
var chunk Chunk
err := db.First(&chunk).Error
@@ -93,14 +92,13 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
func (o *Chunk) GetAssignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
db = db.Where("end_block_number <= ?", height)
var chunk Chunk
err := db.First(&chunk).Error
@@ -198,33 +196,18 @@ func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash s
return count == 0, nil
}
// GetChunkByHash retrieves the given chunk.
func (o *Chunk) GetChunkByHash(ctx context.Context, chunkHash string) (*Chunk, error) {
// GetChunkBatchHash retrieves the batchHash of a given chunk.
func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash = ?", chunkHash)
db = db.Select("batch_hash")
var chunk Chunk
if err := db.First(&chunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunkByHash error: %w, chunk hash: %v", err, chunkHash)
return "", fmt.Errorf("Chunk.GetChunkBatchHash error: %w, chunk hash: %v", err, chunkHash)
}
return &chunk, nil
}
// GetChunkByStartBlockNumber retrieves the given chunk.
func (o *Chunk) GetChunkByStartBlockNumber(ctx context.Context, startBlockNumber uint64) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("start_block_number = ?", startBlockNumber)
var chunk Chunk
if err := db.First(&chunk).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Chunk.GetChunkByStartBlockNumber error: %w, chunk start block number: %v", err, startBlockNumber)
}
return &chunk, nil
return chunk.BatchHash, nil
}
// GetAttemptsByHash get chunk attempts by hash. Used by unit test

View File

@@ -2,8 +2,7 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
ProverHeight int `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -35,13 +34,6 @@ import (
"scroll-tech/coordinator/internal/route"
)
const (
forkNumberFour = 4
forkNumberThree = 3
forkNumberTwo = 2
forkNumberOne = 1
)
var (
dbCfg *database.Config
conf *config.Config
@@ -57,14 +49,8 @@ var (
block1 *encoding.Block
block2 *encoding.Block
chunk *encoding.Chunk
hardForkChunk1 *encoding.Chunk
hardForkChunk2 *encoding.Chunk
batch *encoding.Batch
hardForkBatch1 *encoding.Batch
hardForkBatch2 *encoding.Batch
chunk *encoding.Chunk
batch *encoding.Batch
tokenTimeout int
)
@@ -84,7 +70,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = database.InitDB(dbCfg)
assert.NoError(t, err)
@@ -112,26 +98,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
},
}
var chainConf params.ChainConfig
for forkName, forkNumber := range nameForkMap {
switch forkName {
case "banach":
chainConf.BanachBlock = big.NewInt(forkNumber)
case "london":
chainConf.LondonBlock = big.NewInt(forkNumber)
case "istanbul":
chainConf.IstanbulBlock = big.NewInt(forkNumber)
case "homestead":
chainConf.HomesteadBlock = big.NewInt(forkNumber)
case "eip155":
chainConf.EIP155Block = big.NewInt(forkNumber)
}
}
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
router := gin.New()
api.InitController(conf, &chainConf, db, nil)
api.InitController(conf, db, nil)
route.Route(router, conf, nil)
srv := &http.Server{
Addr: coordinatorURL,
@@ -187,14 +157,9 @@ func setEnv(t *testing.T) {
assert.NoError(t, err)
chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}}
hardForkChunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
hardForkChunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
assert.NoError(t, err)
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
hardForkBatch1 = &encoding.Batch{Index: 1, Chunks: []*encoding.Chunk{hardForkChunk1}}
hardForkBatch2 = &encoding.Batch{Index: 2, Chunks: []*encoding.Chunk{hardForkChunk2}}
}
func TestApis(t *testing.T) {
@@ -210,7 +175,6 @@ func TestApis(t *testing.T) {
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestHardFork", testHardForkAssignTask)
// Teardown
t.Cleanup(func() {
@@ -221,7 +185,7 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -234,7 +198,7 @@ func testHandshake(t *testing.T) {
func testFailedHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
}()
@@ -252,7 +216,7 @@ func testFailedHandshake(t *testing.T) {
func testGetTaskBlocked(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -262,7 +226,7 @@ func testGetTaskBlocked(t *testing.T) {
assert.True(t, chunkProver.healthCheckSuccess(t))
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, version.Version)
assert.True(t, batchProver.healthCheckSuccess(t))
assert.True(t, chunkProver.healthCheckSuccess(t))
err := proverBlockListOrm.InsertProverPublicKey(context.Background(), chunkProver.proverName, chunkProver.publicKey())
assert.NoError(t, err)
@@ -296,7 +260,7 @@ func testGetTaskBlocked(t *testing.T) {
func testOutdatedProverVersion(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -319,241 +283,9 @@ func testOutdatedProverVersion(t *testing.T) {
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
func testHardForkAssignTask(t *testing.T) {
tests := []struct {
name string
proofType message.ProofType
forkNumbers map[string]int64
proverForkNames []string
exceptTaskNumber int
exceptGetTaskErrCodes []int
exceptGetTaskErrMsgs []string
}{
{ // hard fork 4, prover 4 block [2-3]
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"banach": forkNumberFour},
exceptTaskNumber: 0,
proverForkNames: []string{"banach", "banach"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"banach": forkNumberFour},
exceptTaskNumber: 0,
proverForkNames: []string{"banach", "banach"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{ // hard fork 1, prover 1 block [2-3]
name: "noTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
exceptTaskNumber: 0,
proverForkNames: []string{"homestead", "homestead"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLessThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
exceptTaskNumber: 0,
proverForkNames: []string{"homestead", "homestead"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 0,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{ // hard fork 3, prover 3 block [2-3]
name: "oneTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"london", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{
name: "oneTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"london", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{
name: "oneTaskForkBatchProverVersionLessThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{ // hard fork 2, prover 2 block [2-3]
name: "twoTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 4, prover 3 block [2-3]
name: "twoTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"banach": forkNumberFour, "istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionMiddleHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
chunkProof := &message.ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
}
// the insert block number is 2 and 3
// chunk1 batch1 contains block number 2
// chunk2 batch2 contains block number 3
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash)
assert.NoError(t, err)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
assert.NoError(t, err)
dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash)
assert.NoError(t, err)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
assert.NoError(t, err)
dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
getTaskNumber := 0
for i := 0; i < 2; i++ {
mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version)
proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i])
assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode)
assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg)
if errCode != types.Success {
continue
}
getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
}
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
})
}
}
func testValidProof(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -579,13 +311,14 @@ func testValidProof(t *testing.T) {
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proofStatus := verifiedSuccess
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
// only prover 0 & 1 submit valid proofs.
proofStatus := generatedFailed
if i <= 1 {
proofStatus = verifiedSuccess
}
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
}
@@ -636,7 +369,7 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -663,10 +396,8 @@ func testInvalidProof(t *testing.T) {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
@@ -714,7 +445,7 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -741,10 +472,8 @@ func testProofGeneratedFailed(t *testing.T) {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
@@ -803,7 +532,7 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -829,16 +558,12 @@ func testTimeoutProof(t *testing.T) {
// create first chunk & batch mock prover, that will not send any proof.
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version)
proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk, "istanbul")
proverChunkTask := chunkProver1.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, proverChunkTask)
assert.Equal(t, errChunkCode, types.Success)
assert.Equal(t, errChunkMsg, "")
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch, "istanbul")
proverBatchTask := batchProver1.getProverTask(t, message.ProofTypeBatch)
assert.NotNil(t, proverBatchTask)
assert.Equal(t, errBatchCode, types.Success)
assert.Equal(t, errBatchMsg, "")
// verify proof status, it should be assigned, because prover didn't send any proof
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
@@ -864,17 +589,13 @@ func testTimeoutProof(t *testing.T) {
// create second mock prover, that will send valid proof.
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version)
proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk, "istanbul")
proverChunkTask2 := chunkProver2.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
proverBatchTask2 := batchProver2.getProverTask(t, message.ProofTypeBatch)
assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
// verify proof status, it should be verified now, because second prover sent valid proof

View File

@@ -135,7 +135,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
return true
}
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *types.GetTaskSchema {
// get task from coordinator
token := r.connectToCoordinator(t)
assert.NotEmpty(t, token)
@@ -151,12 +151,17 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
return &result.Data, result.ErrCode, result.ErrMsg
assert.Equal(t, ctypes.Success, result.ErrCode)
assert.NotEmpty(t, result.Data.TaskID)
assert.NotEmpty(t, result.Data.TaskType)
assert.NotEmpty(t, result.Data.TaskData)
return &result.Data
}
// Testing expected errors returned by coordinator.

View File

@@ -20,37 +20,21 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
cloud.google.com/go/accessapproval v1.7.4 h1:ZvLvJ952zK8pFHINjpMBY5k7LTAp/6pBf50RDMRgBUI=
cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc=
cloud.google.com/go/accesscontextmanager v1.8.4 h1:Yo4g2XrBETBCqyWIibN3NHNPQKUfQqti0lI+70rubeE=
cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M=
cloud.google.com/go/aiplatform v1.54.0 h1:wH7OYl9Vq/5tupok0BPTFY9xaTLb0GxkReHtB5PF7cI=
cloud.google.com/go/aiplatform v1.54.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
cloud.google.com/go/analytics v0.21.6 h1:fnV7B8lqyEYxCU0LKk+vUL7mTlqRAq4uFlIthIdr/iA=
cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w=
cloud.google.com/go/apigateway v1.6.4 h1:VVIxCtVerchHienSlaGzV6XJGtEM9828Erzyr3miUGs=
cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY=
cloud.google.com/go/apigeeconnect v1.6.4 h1:jSoGITWKgAj/ssVogNE9SdsTqcXnryPzsulENSRlusI=
cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0=
cloud.google.com/go/apigeeregistry v0.8.2 h1:DSaD1iiqvELag+lV4VnnqUUFd8GXELu01tKVdWZrviE=
cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8=
cloud.google.com/go/appengine v1.8.4 h1:Qub3fqR7iA1daJWdzjp/Q0Jz0fUG0JbMc7Ui4E9IX/E=
cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg=
cloud.google.com/go/area120 v0.8.4 h1:YnSO8m02pOIo6AEOgiOoUDVbw4pf+bg2KLHi4rky320=
cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M=
cloud.google.com/go/artifactregistry v1.14.6 h1:/hQaadYytMdA5zBh+RciIrXZQBWK4vN7EUsrQHG+/t8=
cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE=
cloud.google.com/go/asset v1.15.3 h1:uI8Bdm81s0esVWbWrTHcjFDFKNOa9aB7rI1vud1hO84=
cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU=
cloud.google.com/go/assuredworkloads v1.11.4 h1:FsLSkmYYeNuzDm8L4YPfLWV+lQaUrJmH5OuD37t1k20=
cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U=
cloud.google.com/go/automl v1.13.4 h1:i9tOKXX+1gE7+rHpWKjiuPfGBVIYoWvLNIGpWgPtF58=
cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8=
cloud.google.com/go/baremetalsolution v1.2.3 h1:oQiFYYCe0vwp7J8ZmF6siVKEumWtiPFJMJcGuyDVRUk=
cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g=
cloud.google.com/go/batch v1.6.3 h1:mPiIH20a5NU02rucbAmLeO4sLPO9hrTK0BLjdHyW8xw=
cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU=
cloud.google.com/go/beyondcorp v1.0.3 h1:VXf9SnrnSmj2BF2cHkoTHvOUp8gjsz1KJFOMW7czdsY=
cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=
@@ -60,151 +44,85 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf5uA=
cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug=
cloud.google.com/go/bigtable v1.2.0 h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c=
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
cloud.google.com/go/billing v1.17.4 h1:77/4kCqzH6Ou5CCDzNmqmboE+WvbwFBJmw1QZQz19AI=
cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk=
cloud.google.com/go/binaryauthorization v1.7.3 h1:3R6WYn1JKIaVicBmo18jXubu7xh4mMkmbIgsTXk0cBA=
cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU=
cloud.google.com/go/certificatemanager v1.7.4 h1:5YMQ3Q+dqGpwUZ9X5sipsOQ1fLPsxod9HNq0+nrqc6I=
cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE=
cloud.google.com/go/channel v1.17.3 h1:Rd4+fBrjiN6tZ4TR8R/38elkyEkz6oogGDr7jDyjmMY=
cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE=
cloud.google.com/go/cloudbuild v1.15.0 h1:9IHfEMWdCklJ1cwouoiQrnxmP0q3pH7JUt8Hqx4Qbck=
cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM=
cloud.google.com/go/clouddms v1.7.3 h1:xe/wJKz55VO1+L891a1EG9lVUgfHr9Ju/I3xh1nwF84=
cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc=
cloud.google.com/go/cloudtasks v1.12.4 h1:5xXuFfAjg0Z5Wb81j2GAbB3e0bwroCeSF+5jBn/L650=
cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0=
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
cloud.google.com/go/compute/metadata v0.2.0 h1:nBbNSZyDpkNlo3DepaaLKVuO7ClyifSAmNloSCZrHnQ=
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/contactcenterinsights v1.12.0 h1:wP41IUA4ucMVooj/TP53jd7vbNjWrDkAPOeulVJGT5U=
cloud.google.com/go/contactcenterinsights v1.12.0/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis=
cloud.google.com/go/container v1.28.0 h1:/o82CFWXIYnT9p/07SnRgybqL3Pmmu86jYIlzlJVUBY=
cloud.google.com/go/container v1.28.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4=
cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8=
cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U=
cloud.google.com/go/datacatalog v1.19.0 h1:rbYNmHwvAOOwnW2FPXYkaK3Mf1MmGqRzK0mMiIEyLdo=
cloud.google.com/go/datacatalog v1.19.0/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM=
cloud.google.com/go/dataflow v0.9.4 h1:7VmCNWcPJBS/srN2QnStTB6nu4Eb5TMcpkmtaPVhRt4=
cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w=
cloud.google.com/go/dataform v0.9.1 h1:jV+EsDamGX6cE127+QAcCR/lergVeeZdEQ6DdrxW3sQ=
cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs=
cloud.google.com/go/datafusion v1.7.4 h1:Q90alBEYlMi66zL5gMSGQHfbZLB55mOAg03DhwTTfsk=
cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM=
cloud.google.com/go/datalabeling v0.8.4 h1:zrq4uMmunf2KFDl/7dS6iCDBBAxBnKVDyw6+ajz3yu0=
cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8=
cloud.google.com/go/dataplex v1.11.2 h1:AfFFR15Ifh4U+Me1IBztrSd5CrasTODzy3x8KtDyHdc=
cloud.google.com/go/dataplex v1.11.2/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
cloud.google.com/go/dataproc/v2 v2.3.0 h1:tTVP9tTxmc8fixxOd/8s6Q6Pz/+yzn7r7XdZHretQH0=
cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY=
cloud.google.com/go/dataqna v0.8.4 h1:NJnu1kAPamZDs/if3bJ3+Wb6tjADHKL83NUWsaIp2zg=
cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c=
cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/datastore v1.15.0 h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg=
cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8=
cloud.google.com/go/datastream v1.10.3 h1:Z2sKPIB7bT2kMW5Uhxy44ZgdJzxzE5uKjavoW+EuHEE=
cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA=
cloud.google.com/go/deploy v1.15.0 h1:ZdmYzRMTGkVyP1nXEUat9FpbJGJemDcNcx82RSSOElc=
cloud.google.com/go/deploy v1.15.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g=
cloud.google.com/go/dialogflow v1.44.3 h1:cK/f88KX+YVR4tLH4clMQlvrLWD2qmKJQziusjGPjmc=
cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ=
cloud.google.com/go/dlp v1.11.1 h1:OFlXedmPP/5//X1hBEeq3D9kUVm9fb6ywYANlpv/EsQ=
cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI=
cloud.google.com/go/documentai v1.23.5 h1:KAlzT+q8qvRxAmhsJUvLtfFHH0PNvz3M79H6CgVBKL8=
cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g=
cloud.google.com/go/domains v0.9.4 h1:ua4GvsDztZ5F3xqjeLKVRDeOvJshf5QFgWGg1CKti3A=
cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY=
cloud.google.com/go/edgecontainer v1.1.4 h1:Szy3Q/N6bqgQGyxqjI+6xJZbmvPvnFHp3UZr95DKcQ0=
cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE=
cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=
cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
cloud.google.com/go/essentialcontacts v1.6.5 h1:S2if6wkjR4JCEAfDtIiYtD+sTz/oXjh2NUG4cgT1y/Q=
cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM=
cloud.google.com/go/eventarc v1.13.3 h1:+pFmO4eu4dOVipSaFBLkmqrRYG94Xl/TQZFOeohkuqU=
cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg=
cloud.google.com/go/filestore v1.8.0 h1:/+wUEGwk3x3Kxomi2cP5dsR8+SIXxo7M0THDjreFSYo=
cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI=
cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw=
cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ=
cloud.google.com/go/functions v1.15.4 h1:ZjdiV3MyumRM6++1Ixu6N0VV9LAGlCX4AhW6Yjr1t+U=
cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I=
cloud.google.com/go/gkebackup v1.3.4 h1:KhnOrr9A1tXYIYeXKqCKbCI8TL2ZNGiD3dm+d7BDUBg=
cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI=
cloud.google.com/go/gkeconnect v0.8.4 h1:1JLpZl31YhQDQeJ98tK6QiwTpgHFYRJwpntggpQQWis=
cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw=
cloud.google.com/go/gkehub v0.14.4 h1:J5tYUtb3r0cl2mM7+YHvV32eL+uZQ7lONyUZnPikCEo=
cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc=
cloud.google.com/go/gkemulticloud v1.0.3 h1:NmJsNX9uQ2CT78957xnjXZb26TDIMvv+d5W2vVUt0Pg=
cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0=
cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
cloud.google.com/go/gsuiteaddons v1.6.4 h1:uuw2Xd37yHftViSI8J2hUcCS8S7SH3ZWH09sUDLW30Q=
cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE=
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
cloud.google.com/go/iap v1.9.3 h1:M4vDbQ4TLXdaljXVZSwW7XtxpwXUUarY2lIs66m0aCM=
cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw=
cloud.google.com/go/ids v1.4.4 h1:VuFqv2ctf/A7AyKlNxVvlHTzjrEvumWaZflUzBPz/M4=
cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI=
cloud.google.com/go/iot v1.7.4 h1:m1WljtkZnvLTIRYW1YTOv5A6H1yKgLHR6nU7O8yf27w=
cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk=
cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM=
cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
cloud.google.com/go/language v1.12.2 h1:zg9uq2yS9PGIOdc0Kz/l+zMtOlxKWonZjjo5w5YPG2A=
cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc=
cloud.google.com/go/lifesciences v0.9.4 h1:rZEI/UxcxVKEzyoRS/kdJ1VoolNItRWjNN0Uk9tfexg=
cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA=
cloud.google.com/go/logging v1.8.1 h1:26skQWPeYhvIasWKm48+Eq7oUqdcdbwsCVwz5Ys0FvU=
cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI=
cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs=
cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg=
cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI=
cloud.google.com/go/managedidentities v1.6.4 h1:SF/u1IJduMqQQdJA4MDyivlIQ4SrV5qAawkr/ZEREkY=
cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM=
cloud.google.com/go/maps v1.6.1 h1:2+eMp/1MvMPp5qrSOd3vtnLKa/pylt+krVRqET3jWsM=
cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18=
cloud.google.com/go/mediatranslation v0.8.4 h1:VRCQfZB4s6jN0CSy7+cO3m4ewNwgVnaePanVCQh/9Z4=
cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4=
cloud.google.com/go/memcache v1.10.4 h1:cdex/ayDd294XBj2cGeMe6Y+H1JvhN8y78B9UW7pxuQ=
cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0=
cloud.google.com/go/metastore v1.13.3 h1:94l/Yxg9oBZjin2bzI79oK05feYefieDq0o5fjLSkC8=
cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE=
cloud.google.com/go/monitoring v1.16.3 h1:mf2SN9qSoBtIgiMA4R/y4VADPWZA7VCNJA079qLaZQ8=
cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw=
cloud.google.com/go/networkconnectivity v1.14.3 h1:e9lUkCe2BexsqsUc2bjV8+gFBpQa54J+/F3qKVtW+wA=
cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek=
cloud.google.com/go/networkmanagement v1.9.3 h1:HsQk4FNKJUX04k3OI6gUsoveiHMGvDRqlaFM2xGyvqU=
cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU=
cloud.google.com/go/networksecurity v0.9.4 h1:947tNIPnj1bMGTIEBo3fc4QrrFKS5hh0bFVsHmFm4Vo=
cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w=
cloud.google.com/go/notebooks v1.11.2 h1:eTOTfNL1yM6L/PCtquJwjWg7ZZGR0URFaFgbs8kllbM=
cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70=
cloud.google.com/go/optimization v1.6.2 h1:iFsoexcp13cGT3k/Hv8PA5aK+FP7FnbhwDO9llnruas=
cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY=
cloud.google.com/go/orchestration v1.8.4 h1:kgwZ2f6qMMYIVBtUGGoU8yjYWwMTHDanLwM/CQCFaoQ=
cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI=
cloud.google.com/go/orgpolicy v1.11.4 h1:RWuXQDr9GDYhjmrredQJC7aY7cbyqP9ZuLbq5GJGves=
cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI=
cloud.google.com/go/osconfig v1.12.4 h1:OrRCIYEAbrbXdhm13/JINn9pQchvTTIzgmOCA7uJw8I=
cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA=
cloud.google.com/go/oslogin v1.12.2 h1:NP/KgsD9+0r9hmHC5wKye0vJXVwdciv219DtYKYjgqE=
cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY=
cloud.google.com/go/phishingprotection v0.8.4 h1:sPLUQkHq6b4AL0czSJZ0jd6vL55GSTHz2B3Md+TCZI0=
cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE=
cloud.google.com/go/policytroubleshooter v1.10.2 h1:sq+ScLP83d7GJy9+wpwYJVnY+q6xNTXwOdRIuYjvHT4=
cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0=
cloud.google.com/go/privatecatalog v0.9.4 h1:Vo10IpWKbNvc/z/QZPVXgCiwfjpWoZ/wbgful4Uh/4E=
cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ=
@@ -212,41 +130,23 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g=
cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
cloud.google.com/go/pubsublite v1.8.1 h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY=
cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
cloud.google.com/go/recaptchaenterprise/v2 v2.8.4 h1:KOlLHLv3h3HwcZAkx91ubM3Oztz3JtT3ZacAJhWDorQ=
cloud.google.com/go/recaptchaenterprise/v2 v2.8.4/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w=
cloud.google.com/go/recommendationengine v0.8.4 h1:JRiwe4hvu3auuh2hujiTc2qNgPPfVp+Q8KOpsXlEzKQ=
cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU=
cloud.google.com/go/recommender v1.11.3 h1:VndmgyS/J3+izR8V8BHa7HV/uun8//ivQ3k5eVKKyyM=
cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4=
cloud.google.com/go/redis v1.14.1 h1:J9cEHxG9YLmA9o4jTSvWt/RuVEn6MTrPlYSCRHujxDQ=
cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs=
cloud.google.com/go/resourcemanager v1.9.4 h1:JwZ7Ggle54XQ/FVYSBrMLOQIKoIT/uer8mmNvNLK51k=
cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0=
cloud.google.com/go/resourcesettings v1.6.4 h1:yTIL2CsZswmMfFyx2Ic77oLVzfBFoWBYgpkgiSPnC4Y=
cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI=
cloud.google.com/go/retail v1.14.4 h1:geqdX1FNqqL2p0ADXjPpw8lq986iv5GrVcieTYafuJQ=
cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg=
cloud.google.com/go/run v1.3.3 h1:qdfZteAm+vgzN1iXzILo3nJFQbzziudkJrvd9wCf3FQ=
cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4=
cloud.google.com/go/scheduler v1.10.5 h1:eMEettHlFhG5pXsoHouIM5nRT+k+zU4+GUvRtnxhuVI=
cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI=
cloud.google.com/go/secretmanager v1.11.4 h1:krnX9qpG2kR2fJ+u+uNyNo+ACVhplIAS4Pu7u+4gd+k=
cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w=
cloud.google.com/go/security v1.15.4 h1:sdnh4Islb1ljaNhpIXlIPgb3eYj70QWgPVDKOUYvzJc=
cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4=
cloud.google.com/go/securitycenter v1.24.2 h1:qCEyXoJoxNKKA1bDywBjjqCB7ODXazzHnVWnG5Uqd1M=
cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM=
cloud.google.com/go/servicedirectory v1.11.3 h1:5niCMfkw+jifmFtbBrtRedbXkJm3fubSR/KHbxSJZVM=
cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw=
cloud.google.com/go/shell v1.7.4 h1:nurhlJcSVFZneoRZgkBEHumTYf/kFJptCK2eBUq/88M=
cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM=
cloud.google.com/go/spanner v1.53.0 h1:/NzWQJ1MEhdRcffiutRKbW/AIGVKhcTeivWTDjEyCCo=
cloud.google.com/go/spanner v1.53.0/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws=
cloud.google.com/go/speech v1.21.0 h1:qkxNao58oF8ghAHE1Eghen7XepawYEN5zuZXYWaUTA4=
cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
@@ -256,35 +156,20 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
cloud.google.com/go/storagetransfer v1.10.3 h1:YM1dnj5gLjfL6aDldO2s4GeU8JoAvH1xyIwXre63KmI=
cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc=
cloud.google.com/go/talent v1.6.5 h1:LnRJhhYkODDBoTwf6BeYkiJHFw9k+1mAFNyArwZUZAs=
cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI=
cloud.google.com/go/texttospeech v1.7.4 h1:ahrzTgr7uAbvebuhkBAAVU6kRwVD0HWsmDsvMhtad5Q=
cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74=
cloud.google.com/go/tpu v1.6.4 h1:XIEH5c0WeYGaVy9H+UueiTaf3NI6XNdB4/v6TFQJxtE=
cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y=
cloud.google.com/go/trace v1.10.4 h1:2qOAuAzNezwW3QN+t41BtkDJOG42HywL73q8x/f6fnM=
cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY=
cloud.google.com/go/translate v1.9.3 h1:t5WXTqlrk8VVJu/i3WrYQACjzYJiff5szARHiyqqPzI=
cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0=
cloud.google.com/go/video v1.20.3 h1:Xrpbm2S9UFQ1pZEeJt9Vqm5t2T/z9y/M3rNXhFoo8Is=
cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU=
cloud.google.com/go/videointelligence v1.11.4 h1:YS4j7lY0zxYyneTFXjBJUj2r4CFe/UoIi/PJG0Zt/Rg=
cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8=
cloud.google.com/go/vision/v2 v2.7.5 h1:T/ujUghvEaTb+YnFY/jiYwVAkMbIC8EieK0CJo6B4vg=
cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM=
cloud.google.com/go/vmmigration v1.7.4 h1:qPNdab4aGgtaRX+51jCOtJxlJp6P26qua4o1xxUDjpc=
cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70=
cloud.google.com/go/vmwareengine v1.0.3 h1:WY526PqM6QNmFHSqe2sRfK6gRpzWjmL98UFkql2+JDM=
cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4=
cloud.google.com/go/vpcaccess v1.7.4 h1:zbs3V+9ux45KYq8lxxn/wgXole6SlBHHKKyZhNJoS+8=
cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk=
cloud.google.com/go/webrisk v1.9.4 h1:iceR3k0BCRZgf2D/NiKviVMFfuNC9LmeNLtxUFRB/wI=
cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0=
cloud.google.com/go/websecurityscanner v1.6.4 h1:5Gp7h5j7jywxLUp6NTpjNPkgZb3ngl0tUSw6ICWvtJQ=
cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o=
cloud.google.com/go/workflows v1.12.3 h1:qocsqETmLAl34mSa01hKZjcqAvt699gaoFbooGGMvaM=
cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g=
collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
@@ -298,19 +183,16 @@ github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuI
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.2/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
@@ -318,7 +200,6 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
@@ -352,7 +233,6 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 h1:XMEdVDFxgulDDl0lQmAZS6j8gRQ/0pJ+ZpXH2FHVtDc=
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
@@ -385,10 +265,8 @@ github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKz
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7 h1:mreN1m/5VJ/Zc3b4pzj9qU6D9SRQ6Vm+3KfI328t3S8=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -406,17 +284,13 @@ github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNu
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 h1:xlwdaKcTNVW4PtpQb8aKA4Pjy0CdJHEqvFbAnvR5m2g=
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agnivade/levenshtein v1.0.1 h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw=
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alecthomas/kingpin/v2 v2.3.1 h1:ANLJcKmQm4nIaog7xdr/id6FM6zm5hHnfZrvtKPxqGg=
github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
@@ -425,29 +299,22 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZq
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=
github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/avast/retry-go/v4 v4.1.0 h1:CwudD9anYv6JMVnDuTRlK6kLo4dBamiL+F3U8YDiyfg=
github.com/avast/retry-go/v4 v4.1.0/go.mod h1:HqmLvS2VLdStPCGDFjSuZ9pzlTqVRldCI4w2dO4m1Ms=
@@ -456,7 +323,6 @@ github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJ
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.1.1 h1:ZAoq32boMzcaTW9bcUacBswAmHTbvlvDJICgHFZuECo=
github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
@@ -470,7 +336,6 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 h1:EtEU7WRaWliitZh2nmuxEXrN
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 h1:kFDCPqqVvb9vYcW82L7xYfrBGpuxXQ/8A/zYVayRQK4=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56/go.mod h1:FoSBuessadgy8Cqp9gQF8U5rzi1XVQhiEJ6su2/kBEE=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
@@ -478,23 +343,18 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22 h1:lTqBRUuy8oLhBsnnVZf14uRbIHPHCrGqg4Plc8gU/1U=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22/go.mod h1:YsOa3tFriwWNvBPYHXM5ARiU2yqBNWPWeUiq+4i7Na0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25 h1:B/hO3jfWRm7hP00UeieNlI5O2xP5WJ27tyJG5lzc7AM=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25/go.mod h1:54K1zgxK/lai3a4HosE4IKBwZsP/5YAJ6dzJfwsjJ0U=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 h1:4AH9fFjUlVktQMznF+YN33aWNXaR4VgDXyP28qokJC0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24 h1:i4RH8DLv/BHY0fCrXYQDr+DGnWzaxB3Ee/esxUaSavk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24/go.mod h1:N8X45/o2cngvjCYi2ZnvI0P4mU4ZRJfEYC3maCSsPyw=
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 h1:cKr6St+CtC3/dl/rEBJvlk7A/IN5D5F02GNkGzfbtVU=
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2 h1:/RPQNjh1sDIezpXaFIkZb7MlXnSyAqjVdAwcJuGYTqg=
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2/go.mod h1:TQZBt/WaQy+zTHoW++rnl8JBrmZ0VO6EUbVua1+foCA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6 h1:zzTm99krKsFcF4N7pu2z17yCcAZpQYZ7jnJZPIgEMXE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6/go.mod h1:PudwVKUTApfm0nYaPutOXaKdPKTlZYClGBQpVIRdcbs=
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 h1:37QubsarExl5ZuCBlnRP+7l1tNwZPBSTqpTBrPH98RU=
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
@@ -516,20 +376,16 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible h1:Ppm0npCCsmuR9oQaBtRuZcmILVE74aXE+AmrJj8L2ns=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
@@ -549,7 +405,6 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -568,7 +423,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWs
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ=
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
@@ -579,7 +433,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrC
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@@ -613,49 +466,33 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1
github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c h1:llSLg4o9EgH3SrXky+Q5BqEYqV76NGKo07K5Ps2pIKo=
github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA=
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA=
github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA=
github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs/v2 v2.0.0 h1:FN4wsx7KQrYoLXN7uLP0vBV4oVWHOIKDRQ1G2Z0oL5M=
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 h1:Xy9Tkx0tk/SsMfLDFc69wzqSrxQHYEFELHBO/Z8XO3M=
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU=
github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU=
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA=
github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
github.com/containerd/imgcrypt v1.1.7 h1:WSf9o9EQ0KGHiUx2ESFZ+PKf4nxK9BcvV/nJDX8RkB4=
github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k=
github.com/containerd/nri v0.4.0 h1:PjgIBm0RtUiFyEO6JqPBQZRQicbsIz41Fz/5VSC0zgw=
github.com/containerd/nri v0.4.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI=
github.com/containerd/stargz-snapshotter v0.14.3/go.mod h1:j2Ya4JeA5gMZJr8BchSkPjlcCEh++auAxp4nidPI6N0=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/containerd/zfs v1.1.0 h1:n7OZ7jZumLIqNJqXrEc/paBM840mORnmGdJDmAmJZHM=
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=
github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
@@ -669,7 +506,6 @@ github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJ
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -680,13 +516,11 @@ github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 h1:OGNva6WhsKst5OZf7eZOklDztV3hwtTHovdrLHV+MsA=
github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw=
github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk=
github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=
@@ -702,7 +536,6 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/djherbis/atime v1.1.0 h1:rgwVbP/5by8BvvjBNrbh64Qz33idKT3pSnMSJsxhi0g=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
@@ -717,13 +550,11 @@ github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32Paq
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
@@ -741,7 +572,6 @@ github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d h1:W1n4DvpzZGOI
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae h1:UTOyRlLeWJrZx+ynml6q6qzZ1uDkJe/0Z5CMZRbEIJg=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
@@ -756,10 +586,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:Q
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
@@ -768,7 +596,6 @@ github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqB
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.11.1/go.mod h1:DuefStAgaxoaYGLR0FueVcVbehmn5n9QUcVrMCuOvuc=
github.com/ethereum/go-ethereum v1.11.4/go.mod h1:it7x0DWnTDMfVFdXcU6Ti4KEFQynLHVRarcSlPr0HBo=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
@@ -794,10 +621,8 @@ github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0H
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90 h1:WXb3TSNmHp2vHoCroCIB1foO/yQ36swABL8aOVeDpgg=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc=
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
@@ -847,6 +672,8 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
@@ -863,7 +690,6 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyL
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
@@ -879,7 +705,6 @@ github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA=
github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -914,7 +739,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
@@ -928,9 +752,7 @@ github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85q
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
@@ -941,7 +763,6 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
@@ -964,7 +785,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
@@ -1003,24 +823,18 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKh
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/guptarohit/asciigraph v0.5.5 h1:ccFnUF8xYIOUPPY3tmdvRyHqmn1MYI9iv1pLKX+/ZkQ=
github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag=
github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM=
github.com/hanwen/go-fuse/v2 v2.2.0/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc=
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992 h1:fYOrSfO5C9PmFGtmRWSYGqq52SOoE2dXMtAn2Xzh1LQ=
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM=
github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
@@ -1031,7 +845,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
@@ -1084,7 +897,6 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c=
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
github.com/iris-contrib/blackfriday v2.0.0+incompatible h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4=
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
@@ -1133,7 +945,6 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QU
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/josephspurrier/goversioninfo v1.4.0 h1:Puhl12NSHUSALHSuzYwPYQkqa2E1+7SrtAPJorKK0C8=
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -1151,7 +962,6 @@ github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKM
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/loggo v0.0.0-20180524022052-584905176618 h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8=
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 h1:WQM1NildKThwdP7qWrNAFGzp4ijNLw8RlgENkaI4MJs=
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
@@ -1226,7 +1036,6 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTje
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1 h1:0pHpWtx9vcvC0xGZqEQlQdfSQs7WRlAjuPvk3fOZDCo=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
@@ -1244,20 +1053,13 @@ github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/iter v1.0.1 h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A=
github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
github.com/lestrrat-go/jwx v1.2.25 h1:tAx93jN2SdPvFn08fHNAhqFJazn5mBBOB8Zli0g0otA=
github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4=
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
@@ -1317,18 +1119,14 @@ github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA=
@@ -1342,7 +1140,6 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
@@ -1378,20 +1175,16 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/open-policy-agent/opa v0.42.2 h1:qocVAKyjrqMjCqsU02S/gHyLr4AQQ9xMtuV1kKnnyhM=
github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o=
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c=
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
@@ -1405,7 +1198,6 @@ github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwb
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/penglongli/gin-metrics v0.1.10/go.mod h1:wxGsGUwpVGv3hmYSxQn2GZgRL3YuCgiRFq2d0X6+EOU=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
@@ -1419,16 +1211,13 @@ github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFu
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug=
github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
@@ -1461,12 +1250,10 @@ github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
@@ -1517,7 +1304,6 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
@@ -1536,23 +1322,17 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k=
github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE=
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
@@ -1567,11 +1347,8 @@ github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITn
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc=
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg=
github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0=
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
@@ -1600,13 +1377,9 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc=
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8=
github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
@@ -1614,14 +1387,11 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xhit/go-str2duration v1.2.0 h1:BcV5u025cITWxEQKGWr1URRzrcXtu7uk8+luz3Yuhwc=
github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
@@ -1629,7 +1399,6 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwY
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
@@ -1644,31 +1413,21 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA=
github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0=
go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8=
go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI=
go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI=
go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU=
go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE=
go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I=
go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI=
go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0=
go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc=
go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -1679,23 +1438,21 @@ go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1747,7 +1504,6 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -1831,7 +1587,6 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -2059,12 +1814,10 @@ google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
@@ -2078,7 +1831,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -2103,17 +1855,11 @@ k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5
k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8=
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
k8s.io/component-base v0.26.7 h1:uqsOyZh0Zqoaup8tmHa491D/CvgFdGUs+X2H/inNUKM=
k8s.io/component-base v0.26.7/go.mod h1:CZe1HTmX/DQdeBrb9XYOXzs96jXth8ZbFvhLMsoJLUg=
k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q=
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/kms v0.26.7 h1:vruEJNh2IyFnPHbCH8CpUjekHy1HFJtHd/lE2K0lU78=
k8s.io/kms v0.26.7/go.mod h1:AYuV9ZebRhr6cb1eT9L6kZVxvgIUxmE1Fe6kPhqYvuc=
kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0=
kernel.org/pub/linux/libs/security/libcap/cap v1.2.67/go.mod h1:GkntoBuwffz19qtdFVB+k2NtWNN+yCKnC/Ykv/hMiTU=
kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8=
kernel.org/pub/linux/libs/security/libcap/psx v1.2.67/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.1/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
@@ -2134,7 +1880,5 @@ rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 h1:fAPTNEpzQMOLMGwOHNbUkR2xXTQwMJOZYNx+/mLlOh0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37/go.mod h1:vfnxT4FXNT8eGvO+xi/DsyC/qHmdujqwrUa1WSspCsk=
tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ=
tags.cncf.io/container-device-interface/specs-go v0.6.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80=

View File

@@ -41,7 +41,6 @@ type LoginResponse struct {
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
HardForkName string `json:"hard_fork_name"`
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`

View File

@@ -1,6 +1,5 @@
{
"prover_name": "prover-1",
"hard_fork_name": "homestead",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"db_path": "unique-db-path-for-prover-1",

View File

@@ -14,7 +14,6 @@ import (
// Config loads prover configuration items.
type Config struct {
ProverName string `json:"prover_name"`
HardForkName string `json:"hard_fork_name"`
KeystorePath string `json:"keystore_path"`
KeystorePassword string `json:"keystore_password"`
Core *ProverCoreConfig `json:"core"`

View File

@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
HardForkName: r.cfg.HardForkName,
TaskType: r.Type(),
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.VK,

File diff suppressed because one or more lines are too long

View File

@@ -10,7 +10,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
@@ -84,7 +83,7 @@ func action(ctx *cli.Context) error {
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
if err != nil {
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
}

View File

@@ -72,18 +72,18 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := config.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry)
if err != nil {
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)

View File

@@ -10,10 +10,11 @@
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"escalate_blocks": 1,
"confirmations": "0x0",
"confirmations": "0x1",
"escalate_multiple_num": 2,
"escalate_multiple_den": 1,
"max_gas_price": 1000000000000,
"max_blob_gas_price": 10000000000000,
"tx_type": "LegacyTx",
"check_pending_time": 1
},
@@ -34,7 +35,7 @@
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"escalate_blocks": 1,
"confirmations": "0x0",
"confirmations": "0x6",
"escalate_multiple_num": 2,
"escalate_multiple_den": 1,
"max_gas_price": 1000000000000,

View File

@@ -27,7 +27,7 @@ require (
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
@@ -65,7 +65,7 @@ require (
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
@@ -103,7 +103,7 @@ require (
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect

View File

@@ -41,8 +41,8 @@ github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/Yj
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -170,8 +170,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -313,8 +313,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@@ -2,11 +2,14 @@ package config
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"scroll-tech/common/database"
"github.com/scroll-tech/go-ethereum/core"
)
// Config load configuration items.
@@ -41,3 +44,17 @@ func NewConfig(file string) (*Config, error) {
}
return cfg, nil
}
// ReadGenesis parses and returns the genesis file at the given path
func ReadGenesis(genesisPath string) (*core.Genesis, error) {
file, err := os.Open(filepath.Clean(genesisPath))
if err != nil {
return nil, err
}
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
return nil, errors.Join(err, file.Close())
}
return genesis, file.Close()
}

View File

@@ -13,17 +13,13 @@ import (
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
bridgeAbi "scroll-tech/rollup/abi"
@@ -65,12 +61,10 @@ type Layer2Relayer struct {
chainMonitorClient *resty.Client
metrics *l2RelayerMetrics
chainCfg *params.ChainConfig
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var gasOracleSender, commitSender, finalizeSender *sender.Sender
var err error
@@ -139,8 +133,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
chainCfg: chainCfg,
cfg: cfg,
}
// chain_monitor client
@@ -196,7 +189,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
err = r.db.Transaction(func(dbTX *gorm.DB) error {
var dbChunk *orm.Chunk
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, dbTX)
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk: %v", err)
}
@@ -213,7 +206,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -246,9 +239,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if err != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
}
// submit genesis batch to L1 rollup contract
@@ -291,8 +284,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
return
}
@@ -337,78 +330,95 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
batches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
for _, dbBatch := range dbBatches {
for _, batch := range batches {
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
// get current header and parent header.
daBatch, err := codecv0.NewDABatchFromBytes(batch.BatchHeader)
if err != nil {
log.Error("failed to get chunks in range", "err", err)
log.Error("Failed to initialize new DA batch from bytes", "index", batch.Index, "hash", batch.Hash, "err", err)
return
}
parentBatch := &orm.Batch{}
if batch.Index > 0 {
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
if err != nil {
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
return
}
if types.RollupStatus(parentBatch.RollupStatus) == types.RollupCommitFailed {
log.Error("Previous batch commit failed, halting further committing",
"index", parentBatch.Index, "tx hash", parentBatch.CommitTxHash)
return
}
}
// get the metadata of chunks for the batch
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
if err != nil {
log.Error("Failed to fetch chunks",
"start index", batch.StartChunkIndex,
"end index", batch.EndChunkIndex, "error", err)
return
}
chunks := make([]*encoding.Chunk, len(dbChunks))
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if getErr != nil {
log.Error("failed to get blocks in range", "err", getErr)
var blocks []*encoding.Block
blocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch blocks", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
chunk := &encoding.Chunk{
Blocks: blocks,
}
var daChunk *codecv0.DAChunk
daChunk, err = codecv0.NewDAChunk(chunk, c.TotalL1MessagesPoppedBefore)
if err != nil {
log.Error("Failed to initialize new DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
var daChunkBytes []byte
daChunkBytes, err = daChunk.Encode()
if err != nil {
log.Error("Failed to encode DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return
}
encodedChunks[i] = daChunkBytes
}
if dbBatch.Index == 0 {
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
calldata, err := r.l1RollupABI.Pack("commitBatch", daBatch.Version, parentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if err != nil {
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
return
}
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
if getErr != nil {
log.Error("failed to get parent batch header", "err", getErr)
return
}
var calldata []byte
var blob *kzg4844.Blob
if !r.chainCfg.IsBanach(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
calldata, err = r.constructCommitBatchPayloadCodecV0(dbBatch, dbParentBatch, dbChunks, chunks)
if err != nil {
log.Error("failed to construct commitBatch payload codecv0", "index", dbBatch.Index, "err", err)
return
}
} else { // codecv1
calldata, blob, err = r.constructCommitBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks)
if err != nil {
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
return
}
}
// fallbackGasLimit is non-zero only in sending non-blob transactions.
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed and failed at least once.
// send transaction
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed failed.
fallbackGasLimit = 0
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
}
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, blob, fallbackGasLimit)
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, calldata, nil, fallbackGasLimit)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"Failed to send commitBatch tx to layer1",
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
@@ -416,13 +426,13 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
}
@@ -491,98 +501,103 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
// Check batch status before send `finalizeBatch` tx.
if r.cfg.ChainMonitor.Enabled {
var batchStatus bool
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
batchStatus, err := r.getBatchStatusByIndex(batch)
if err != nil {
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err)
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
return err
}
if !batchStatus {
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
return err
}
}
if dbBatch.Index == 0 {
return fmt.Errorf("invalid args: batch index is 0, should only happen in finalizing genesis batch")
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
// handle unexpected db error
if err != nil {
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
return err
}
parentBatchStateRoot = parentBatch.StateRoot
}
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
if getErr != nil {
return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr)
}
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return fmt.Errorf("failed to fetch chunks: %w", err)
}
var aggProof *message.BatchProof
var txCalldata []byte
if withProof {
aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash)
if getErr != nil {
return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr)
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
return err
}
if err = aggProof.SanityCheck(); err != nil {
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err)
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
return err
}
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatchWithProof",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
aggProof.Proof,
)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return err
}
} else {
var err error
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatch",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
)
if err != nil {
log.Error("Pack finalizeBatch failed", "err", err)
return err
}
}
var calldata []byte
if !r.chainCfg.IsBanach(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
if err != nil {
return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
}
} else { // codecv1
chunks := make([]*encoding.Chunk, len(dbChunks))
for i, c := range dbChunks {
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if dbErr != nil {
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
}
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
if err != nil {
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
}
}
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, txCalldata, nil, 0)
finalizeTxHash := &txHash
if err != nil {
log.Error(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"calldata", common.Bytes2Hex(txCalldata),
"err", err,
)
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
// record and sync with db, @todo handle db error
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
return err
}
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
@@ -714,140 +729,6 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
}
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, error) {
daBatch, err := codecv0.NewDABatchFromBytes(dbBatch.BatchHeader)
if err != nil {
return nil, fmt.Errorf("failed to create DA batch from bytes: %w", err)
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
daChunk, createErr := codecv0.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
if createErr != nil {
return nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
}
daChunkBytes, encodeErr := daChunk.Encode()
if encodeErr != nil {
return nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr)
}
encodedChunks[i] = daChunkBytes
}
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if packErr != nil {
return nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
}
return calldata, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, createErr := codecv1.NewDABatch(batch)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
daChunk, createErr := codecv1.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
}
encodedChunks[i] = daChunk.Encode()
}
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
if packErr != nil {
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
}
return calldata, daBatch.Blob(), nil
}
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
if aggProof != nil { // finalizeBatch with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatchWithProof",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
aggProof.Proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr)
}
return calldata, nil
}
// finalizeBatch without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatch",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr)
}
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, createErr := codecv1.NewDABatch(batch)
if createErr != nil {
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
}
blobDataProof, getErr := daBatch.BlobDataProof()
if getErr != nil {
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
}
if aggProof != nil { // finalizeBatch4844 with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatchWithProof4844",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
blobDataProof,
aggProof.Proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
}
return calldata, nil
}
// finalizeBatch4844 without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBatch4844",
dbBatch.BatchHeader,
common.HexToHash(dbParentBatch.StateRoot),
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
blobDataProof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -11,9 +11,7 @@ import (
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/params"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -42,173 +40,128 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
return nil
})
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
relayer.StopSenders()
patchGuard.Reset()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
relayer.StopSenders()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
chainConfig := &params.ChainConfig{}
if codecVersion == encoding.CodecV0 {
chainConfig.BanachBlock = big.NewInt(0)
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
})
assert.True(t, ok)
relayer.StopSenders()
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
})
assert.True(t, ok)
}
func testL2RelayerCommitConfirm(t *testing.T) {
@@ -219,7 +172,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -229,13 +182,13 @@ func testL2RelayerCommitConfirm(t *testing.T) {
batchHashes := make([]string, len(isSuccessful))
for i := range batchHashes {
batch := &encoding.Batch{
Index: uint64(i + 1),
Index: uint64(i),
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
batchHashes[i] = dbBatch.Hash
}
@@ -275,7 +228,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -285,13 +238,13 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
batchHashes := make([]string, len(isSuccessful))
for i := range batchHashes {
batch := &encoding.Batch{
Index: uint64(i + 1),
Index: uint64(i),
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
batchHashes[i] = dbBatch.Hash
}
@@ -335,7 +288,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
}
batchOrm := orm.NewBatch(db)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1)
assert.NoError(t, err)
batch2 := &encoding.Batch{
@@ -345,14 +298,14 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
Chunks: []*encoding.Chunk{chunk2},
}
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0)
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2)
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -392,7 +345,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
@@ -487,32 +440,32 @@ func testGetBatchStatusByIndex(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
status, err := relayer.getBatchStatusByIndex(dbBatch)
assert.NoError(t, err)
assert.Equal(t, true, status)

View File

@@ -15,7 +15,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
@@ -26,8 +25,7 @@ var (
// config
cfg *config.Config
base *docker.App
posL1TestEnv *dockercompose.PoSL1TestEnv
base *docker.App
// l2geth client
l2Cli *ethclient.Client
@@ -53,10 +51,9 @@ func setupEnv(t *testing.T) {
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunL2Geth(t)
base.RunDBImage(t)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN,
@@ -98,19 +95,10 @@ func setupEnv(t *testing.T) {
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
base.Free()
var err error
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
if err != nil {
log.Crit("failed to create PoS L1 test environment", "err", err)
}
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
m.Run()
base.Free()
}
func TestFunctions(t *testing.T) {

View File

@@ -37,6 +37,9 @@ const (
// DynamicFeeTxType type for DynamicFeeTx
DynamicFeeTxType = "DynamicFeeTx"
// BlobTxType type for BlobTx
BlobTxType = "BlobTx"
)
// Confirmation struct used to indicate transaction confirmation details
@@ -160,9 +163,8 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
case LegacyTxType:
return s.estimateLegacyGas(target, data, fallbackGasLimit)
case DynamicFeeTxType:
if sidecar == nil {
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
}
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
case BlobTxType:
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
default:
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
@@ -179,7 +181,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
err error
)
if blob != nil {
if s.config.TxType == BlobTxType {
sidecar, err = makeSidecar(blob)
if err != nil {
log.Error("failed to make sidecar for blob transaction", "error", err)
@@ -233,36 +235,39 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
Data: data,
}
case DynamicFeeTxType:
if sidecar == nil {
txData = &gethTypes.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: data,
Gas: feeData.gasLimit,
AccessList: feeData.accessList,
ChainID: s.chainID,
GasTipCap: feeData.gasTipCap,
GasFeeCap: feeData.gasFeeCap,
}
} else {
if target == nil {
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction to address cannot be nil")
}
txData = &gethTypes.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: data,
Gas: feeData.gasLimit,
AccessList: feeData.accessList,
ChainID: s.chainID,
GasTipCap: feeData.gasTipCap,
GasFeeCap: feeData.gasFeeCap,
}
case BlobTxType:
if target == nil {
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction to address cannot be nil")
}
txData = &gethTypes.BlobTx{
ChainID: uint256.MustFromBig(s.chainID),
Nonce: nonce,
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
Gas: feeData.gasLimit,
To: *target,
Data: data,
AccessList: feeData.accessList,
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
BlobHashes: sidecar.BlobHashes(),
Sidecar: sidecar,
}
if sidecar == nil {
log.Error("blob transaction sidecar cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
return nil, errors.New("blob transaction sidecar cannot be nil")
}
txData = &gethTypes.BlobTx{
ChainID: uint256.MustFromBig(s.chainID),
Nonce: nonce,
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
Gas: feeData.gasLimit,
To: *target,
Data: data,
AccessList: feeData.accessList,
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
BlobHashes: sidecar.BlobHashes(),
Sidecar: sidecar,
}
}
@@ -352,95 +357,94 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
case DynamicFeeTxType:
if tx.BlobTxSidecar() == nil {
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
if originalGasTipCap.Cmp(gasTipCap) == 0 {
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
}
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
} else {
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalBlobGasFeeCap := tx.BlobGasFeeCap()
// bumping at least 100%
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
// adjust for rising blobbasefee
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
blobGasFeeCap = currentBlobGasFeeCap
}
// but don't exceed maxBlobGasPrice
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
blobGasFeeCap = maxBlobGasPrice
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
feeData.blobGasFeeCap = blobGasFeeCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
if originalGasTipCap.Cmp(gasTipCap) == 0 {
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
}
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
case BlobTxType:
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalBlobGasFeeCap := tx.BlobGasFeeCap()
// bumping at least 100%
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
// adjust for rising basefee
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
// adjust for rising blobbasefee
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
blobGasFeeCap = currentBlobGasFeeCap
}
// but don't exceed maxBlobGasPrice
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
blobGasFeeCap = maxBlobGasPrice
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
feeData.blobGasFeeCap = blobGasFeeCap
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
default:
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
}

View File

@@ -45,8 +45,7 @@ var (
cfg *config.Config
base *docker.App
posL1TestEnv *dockercompose.PoSL1TestEnv
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
txTypes = []string{"LegacyTx", "DynamicFeeTx", "BlobTx"}
txUint8Types = []uint8{0, 2, 3}
db *gorm.DB
testContractsAddress common.Address
@@ -81,7 +80,7 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
privateKey = priv
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
base.RunDBImage(t)
db, err = database.InitDB(
@@ -111,7 +110,7 @@ func setupEnv(t *testing.T) {
testContractsAddress = crypto.CreateAddress(auth.From, nonce)
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = l1Client.SendTransaction(context.Background(), signedTx)
@@ -159,14 +158,14 @@ func testNewSender(t *testing.T) {
assert.NoError(t, migrate.ResetDB(sqlDB))
// exit by Stop()
cfgCopy1 := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
newSender1.Stop()
// exit by ctx.Done()
cfgCopy2 := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -181,12 +180,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
hash, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
hash, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
@@ -211,12 +210,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
}
func testFallbackGasLimit(t *testing.T) {
for i, txType := range txTypes {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -226,7 +225,7 @@ func testFallbackGasLimit(t *testing.T) {
assert.NoError(t, err)
// FallbackGasLimit = 0
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
@@ -246,7 +245,7 @@ func testFallbackGasLimit(t *testing.T) {
},
)
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, txBlob[i], 100000)
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, randBlob(), 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
@@ -264,8 +263,8 @@ func testFallbackGasLimit(t *testing.T) {
}
func testResubmitZeroGasPriceTransaction(t *testing.T) {
for i, txType := range txTypes {
if txBlob[i] != nil {
for _, txType := range txTypes {
if txType == BlobTxType {
continue
}
@@ -273,7 +272,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -313,7 +312,7 @@ func testAccessListTransactionGasLimit(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -324,20 +323,16 @@ func testAccessListTransactionGasLimit(t *testing.T) {
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(int64(i+1)))
assert.NoError(t, err)
var sidecar *gethTypes.BlobTxSidecar
if txBlob[i] != nil {
sidecar, err = makeSidecar(txBlob[i])
assert.NoError(t, err)
}
sidecar, err := makeSidecar(randBlob())
assert.NoError(t, err)
gasLimit, accessList, err := s.estimateGasLimit(&testContractsAddress, data, sidecar, nil, big.NewInt(1000000000), big.NewInt(1000000000), big.NewInt(1000000000))
assert.NoError(t, err)
if txType == LegacyTxType { // Legacy transactions can not have an access list.
assert.Equal(t, uint64(43956), gasLimit)
assert.Equal(t, uint64(43935), gasLimit)
assert.Nil(t, accessList)
} else { // Dynamic fee and blob transactions can have an access list.
assert.Equal(t, uint64(43479), gasLimit)
assert.Equal(t, uint64(43458), gasLimit)
assert.NotNil(t, accessList)
}
@@ -346,12 +341,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
}
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
for i, txType := range txTypes {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 110
cfgCopy.EscalateMultipleDen = 100
@@ -365,11 +360,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
blobGasFeeCap: big.NewInt(1000000000),
gasLimit: 50000,
}
var sidecar *gethTypes.BlobTxSidecar
if txBlob[i] != nil {
sidecar, err = makeSidecar(txBlob[i])
assert.NoError(t, err)
}
sidecar, err := makeSidecar(randBlob())
assert.NoError(t, err)
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil)
assert.NoError(t, err)
assert.NotNil(t, tx)
@@ -391,8 +383,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
}
func testResubmitUnderpricedTransaction(t *testing.T) {
for i, txType := range txTypes {
if txBlob[i] != nil {
for _, txType := range txTypes {
if txType == BlobTxType {
continue
}
@@ -400,7 +392,7 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 109
cfgCopy.EscalateMultipleDen = 100
@@ -439,7 +431,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
assert.NoError(t, migrate.ResetDB(sqlDB))
txType := "DynamicFeeTx"
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
@@ -481,8 +473,9 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy.TxType = DynamicFeeTxType
txType := "BlobTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
@@ -538,7 +531,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
assert.NoError(t, err)
@@ -579,7 +572,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
@@ -639,7 +632,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
@@ -709,7 +702,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
@@ -784,8 +777,8 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
)
assert.NoError(t, err)
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy.TxType = DynamicFeeTxType
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = BlobTxType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
assert.NoError(t, err)
defer s.Stop()

View File

@@ -3,7 +3,6 @@ package watcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -15,10 +14,10 @@ import (
"scroll-tech/common/forks"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
// BatchProposer proposes batches based on available unbatched chunks.
@@ -37,15 +36,12 @@ type BatchProposer struct {
gasCostIncreaseMultiplier float64
forkMap map[uint64]bool
chainCfg *params.ChainConfig
batchProposerCircleTotal prometheus.Counter
proposeBatchFailureTotal prometheus.Counter
proposeBatchUpdateInfoTotal prometheus.Counter
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
@@ -53,7 +49,7 @@ type BatchProposer struct {
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
forkHeights, forkMap, _ := forks.CollectSortedForkHeights(chainCfg)
forkHeights, forkMap := forks.CollectSortedForkHeights(chainCfg)
log.Debug("new batch proposer",
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
@@ -62,7 +58,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"forkHeights", forkHeights)
p := &BatchProposer{
return &BatchProposer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
@@ -74,7 +70,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
forkMap: forkMap,
chainCfg: chainCfg,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_propose_batch_circle_total",
@@ -100,10 +95,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
Name: "rollup_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
@@ -117,23 +108,22 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
Help: "Total number of batch chunk propose not enough",
}),
}
return p
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
if err := p.proposeBatch(); err != nil {
batch, err := p.proposeBatch()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
}
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion) error {
err := p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
if batch == nil {
return
}
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex, "error", dbErr)
@@ -150,23 +140,22 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
return nil
}
func (p *BatchProposer) proposeBatch() error {
func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
if err != nil {
return err
return nil, err
}
// select at most p.maxChunkNumPerBatch chunks
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
if err != nil {
return err
return nil, err
}
if len(dbChunks) == 0 {
return nil
return nil, nil
}
maxChunksThisBatch := p.maxChunkNumPerBatch
@@ -181,89 +170,124 @@ func (p *BatchProposer) proposeBatch() error {
}
}
codecVersion := encoding.CodecV0
if p.chainCfg.IsBanach(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) {
codecVersion = encoding.CodecV1
}
daChunks, err := p.getDAChunks(dbChunks)
if err != nil {
return err
return nil, err
}
dbParentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
parentDBBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return err
return nil, err
}
var batch encoding.Batch
batch.Index = dbParentBatch.Index + 1
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
parentBatchEndBlockNumber := daChunks[0].Blocks[0].Header.Number.Uint64() - 1
parentBatchCodecVersion := encoding.CodecV0
// Genesis batch uses codecv0 encoding, otherwise using banach fork to choose codec version.
if dbParentBatch.Index > 0 && p.chainCfg.IsBanach(new(big.Int).SetUint64(parentBatchEndBlockNumber)) {
parentBatchCodecVersion = encoding.CodecV1
}
batch.TotalL1MessagePoppedBefore, err = utils.GetTotalL1MessagePoppedBeforeBatch(dbParentBatch.BatchHeader, parentBatchCodecVersion)
if err != nil {
return err
if parentDBBatch != nil {
batch.Index = parentDBBatch.Index + 1
parentDABatch, err := codecv0.NewDABatchFromBytes(parentDBBatch.BatchHeader)
if err != nil {
return nil, err
}
batch.TotalL1MessagePoppedBefore = parentDABatch.TotalL1MessagePopped
batch.ParentBatchHash = common.HexToHash(parentDBBatch.Hash)
}
for i, chunk := range daChunks {
batch.Chunks = append(batch.Chunks, chunk)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return nil, err
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
metrics.L1CommitBlobSize > maxBlobSize {
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, p.maxChunkNumPerBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentL1CommitCalldataSize", metrics.L1CommitCalldataSize,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
metrics, err := utils.CalculateBatchMetrics(&batch, codecVersion)
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
return nil, err
}
p.recordBatchMetrics(metrics)
return p.updateDBBatchInfo(&batch, codecVersion)
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.batchChunksNum.Set(float64(batch.NumChunks()))
return &batch, nil
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == maxChunksThisBatch {
log.Info("reached maximum number of chunks in batch or first block timeout",
"chunk count", metrics.NumChunks,
"start block number", dbChunks[0].StartBlockNumber,
"start block timestamp", dbChunks[0].StartBlockTime,
"current time", currentTimeSec)
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
batch.NumChunks() == maxChunksThisBatch {
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"start block number", dbChunks[0].StartBlockNumber,
"start block timestamp", dbChunks[0].StartBlockTime,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of chunks in batch",
"chunk count", batch.NumChunks(),
)
}
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
if err != nil {
return nil, err
}
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
if err != nil {
return nil, err
}
p.batchFirstBlockTimeoutReached.Inc()
p.recordBatchMetrics(metrics)
return p.updateDBBatchInfo(&batch, codecVersion)
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.batchChunksNum.Set(float64(batch.NumChunks()))
return &batch, nil
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil
return nil, nil
}
func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, error) {
@@ -280,10 +304,3 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
}
return chunks, nil
}
func (p *BatchProposer) recordBatchMetrics(metrics *utils.BatchMetrics) {
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.batchChunksNum.Set(float64(metrics.NumChunks))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
}

View File

@@ -2,12 +2,9 @@ package watcher
import (
"context"
"math"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
@@ -19,7 +16,7 @@ import (
"scroll-tech/rollup/internal/orm"
)
func testBatchProposerCodecv0Limits(t *testing.T) {
func testBatchProposerLimits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
@@ -107,31 +104,8 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -148,7 +122,8 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
@@ -166,142 +141,17 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, tt.expectedBatchesLen+1)
batches = batches[1:]
if tt.expectedBatchesLen > 0 {
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
assert.NoError(t, err)
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
}
})
}
}
func testBatchProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
batchTimeoutSec uint64
forkBlock *big.Int
expectedBatchesLen int
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
}{
{
name: "NoLimitReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "Timeout",
maxChunkNum: 10,
batchTimeoutSec: 0,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxChunkNumPerBatchIs1",
maxChunkNum: 1,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "ForkBlockReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
forkBlock: big.NewInt(3),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: tt.maxChunkNum,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, tt.expectedBatchesLen+1)
batches = batches[1:]
assert.Len(t, batches, tt.expectedBatchesLen)
if tt.expectedBatchesLen > 0 {
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1)
assert.NoError(t, err)
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
for _, chunk := range dbChunks {
@@ -317,31 +167,8 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -356,7 +183,8 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
@@ -372,16 +200,16 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
}, &params.ChainConfig{}, db, nil)
bp.TryProposeBatch()
batchOrm := orm.NewBatch(db)
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
batches = batches[1:]
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
assert.Len(t, batches, 1)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, uint64(1), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, dbChunks, 2)
for _, chunk := range dbChunks {
@@ -392,76 +220,3 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
assert.Equal(t, uint64(258383), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
}
func testBatchProposerBlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for total := int64(0); total < 7; total++ {
for i := int64(0); i < 10; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(total*10 + i + 1)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: math.MaxUint64,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
for i := 0; i < 10; i++ {
bp.TryProposeBatch()
}
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
batches = batches[1:]
assert.NoError(t, err)
assert.Len(t, batches, 4)
for i, batch := range batches {
expected := uint64(2 * (i + 1))
if expected > 7 {
expected = 7
}
assert.Equal(t, expected, batch.EndChunkIndex)
}
}

View File

@@ -13,14 +13,12 @@ import (
"scroll-tech/common/forks"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
const maxBlobSize = uint64(131072)
// ChunkProposer proposes chunks based on available unchunked blocks.
type ChunkProposer struct {
ctx context.Context
@@ -38,8 +36,6 @@ type ChunkProposer struct {
gasCostIncreaseMultiplier float64
forkHeights []uint64
chainCfg *params.ChainConfig
chunkProposerCircleTotal prometheus.Counter
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
@@ -47,7 +43,6 @@ type ChunkProposer struct {
chunkTxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
@@ -56,7 +51,7 @@ type ChunkProposer struct {
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
forkHeights, _, _ := forks.CollectSortedForkHeights(chainCfg)
forkHeights, _ := forks.CollectSortedForkHeights(chainCfg)
log.Debug("new chunk proposer",
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
@@ -66,7 +61,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"forkHeights", forkHeights)
p := &ChunkProposer{
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
@@ -79,7 +74,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
forkHeights: forkHeights,
chainCfg: chainCfg,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_propose_chunk_circle_total",
@@ -109,10 +103,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
@@ -130,28 +120,32 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
Help: "Total number of chunk block propose not enough",
}),
}
return p
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
if err := p.proposeChunk(); err != nil {
proposedChunk, err := p.proposeChunk()
if err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
}
}
func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) error {
func (p *ChunkProposer) updateChunkInfoInDB(chunk *encoding.Chunk) error {
if chunk == nil {
return nil
}
p.proposeChunkUpdateInfoTotal.Inc()
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecVersion, dbTX)
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
log.Warn("ChunkProposer.InsertChunk failed", "err", err)
return err
@@ -162,18 +156,13 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
}
return nil
})
if err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
return err
}
return nil
return err
}
func (p *ChunkProposer) proposeChunk() error {
func (p *ChunkProposer) proposeChunk() (*encoding.Chunk, error) {
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
if err != nil {
return err
return nil, err
}
maxBlocksThisChunk := p.maxBlockNumPerChunk
@@ -185,91 +174,157 @@ func (p *ChunkProposer) proposeChunk() error {
// select at most maxBlocksThisChunk blocks
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
if err != nil {
return err
return nil, err
}
if len(blocks) == 0 {
return nil
}
codecVersion := encoding.CodecV0
if p.chainCfg.IsBanach(blocks[0].Header.Number) {
codecVersion = encoding.CodecV1
return nil, nil
}
var chunk encoding.Chunk
for i, block := range blocks {
chunk.Blocks = append(chunk.Blocks, block)
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.TxNum > p.maxTxNumPerChunk ||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
metrics.L1CommitBlobSize > maxBlobSize {
totalTxNum := chunk.NumTransactions()
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalTxNum > p.maxTxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
if totalTxNum > p.maxTxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalTxNum,
p.maxTxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, crc max: %v, limit: %v",
block.Header.Number,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"txNum", metrics.TxNum,
"maxTxNum", p.maxTxNumPerChunk,
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
"overEstimatedL1CommitGas", overEstimatedL1CommitGas,
"maxL1CommitGas", p.maxL1CommitGasPerChunk,
"rowConsumption", metrics.CrcMax,
"maxRowConsumption", p.maxRowConsumptionPerChunk,
"maxBlobSize", maxBlobSize)
"totalTxNum", totalTxNum,
"maxTxNumPerChunk", p.maxTxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"chunkRowConsumptionMax", crcMax,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
p.recordChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion)
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.maxTxConsumption.Set(float64(crcMax))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
}
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
log.Info("reached maximum number of blocks in chunk or first block timeout",
"start block number", chunk.Blocks[0].Header.Number,
"block count", len(chunk.Blocks),
"block number", chunk.Blocks[0].Header.Number,
"block timestamp", metrics.FirstBlockTimestamp,
"current time", currentTimeSec)
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec ||
uint64(len(chunk.Blocks)) == maxBlocksThisChunk {
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"block number", chunk.Blocks[0].Header.Number,
"block timestamp", chunk.Blocks[0].Header.Time,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of blocks in chunk",
"start block number", chunk.Blocks[0].Header.Number,
"block count", len(chunk.Blocks),
)
}
crcMax, err := chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
p.chunkFirstBlockTimeoutReached.Inc()
p.recordChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion)
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.maxTxConsumption.Set(float64(crcMax))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil
}
func (p *ChunkProposer) recordChunkMetrics(metrics *utils.ChunkMetrics) {
p.chunkTxNum.Set(float64(metrics.TxNum))
p.maxTxConsumption.Set(float64(metrics.CrcMax))
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
return nil, nil
}

View File

@@ -5,7 +5,6 @@ import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
@@ -16,7 +15,7 @@ import (
"scroll-tech/rollup/internal/orm"
)
func testChunkProposerCodecv0Limits(t *testing.T) {
func testChunkProposerLimits(t *testing.T) {
tests := []struct {
name string
maxBlockNum uint64
@@ -199,164 +198,3 @@ func testChunkProposerCodecv0Limits(t *testing.T) {
})
}
}
func testChunkProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxBlockNum uint64
maxTxNum uint64
maxRowConsumption uint64
chunkTimeoutSec uint64
forkBlock *big.Int
expectedChunksLen int
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
}{
{
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 0,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 2,
},
{
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "ForkBlockReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
forkBlock: big.NewInt(2),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, tt.expectedChunksLen)
if len(chunks) > 0 {
blockOrm := orm.NewL2Block(db)
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
assert.NoError(t, err)
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
firstChunkHash := chunks[0].Hash
for _, chunkHash := range chunkHashes {
assert.Equal(t, firstChunkHash, chunkHash)
}
}
})
}
}
func testChunkProposerCodecv1BlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
for i := int64(0); i < 2000; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(i + 1)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
for i := 0; i < 10; i++ {
cp.TryProposeChunk()
}
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 4)
for i, chunk := range chunks {
expected := uint64(551 * (i + 1))
if expected > 2000 {
expected = 2000
}
assert.Equal(t, expected, chunk.EndBlockNumber)
}
}

View File

@@ -59,7 +59,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
}
}
const blocksFetchLimit = uint64(10)
const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
@@ -71,8 +71,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
}
// Fetch and store block traces for missing blocks
for from := heightInDB + 1; from <= blockHeight; from += blocksFetchLimit {
to := from + blocksFetchLimit - 1
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {
to = blockHeight

View File

@@ -57,9 +57,25 @@ func setupEnv(t *testing.T) (err error) {
l2Cli, err = base.L2Client()
assert.NoError(t, err)
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block2 = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
if err != nil {
return err
}
// unmarshal blockTrace
block1 = &encoding.Block{}
if err = json.Unmarshal(templateBlockTrace1, block1); err != nil {
return err
}
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
if err != nil {
return err
}
// unmarshal blockTrace
block2 = &encoding.Block{}
if err = json.Unmarshal(templateBlockTrace2, block2); err != nil {
return err
}
return err
}
@@ -97,22 +113,9 @@ func TestFunction(t *testing.T) {
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
// Run chunk proposer test cases.
t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits)
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
t.Run("TestChunkProposerCodecv1BlobSizeLimit", testChunkProposerCodecv1BlobSizeLimit)
t.Run("TestChunkProposerLimits", testChunkProposerLimits)
// Run chunk proposer test cases.
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
t.Run("TestBatchProposerLimits", testBatchProposerLimits)
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
data, err := os.ReadFile(filename)
assert.NoError(t, err)
block := &encoding.Block{}
assert.NoError(t, json.Unmarshal(data, block))
return block
}

View File

@@ -7,15 +7,14 @@ import (
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
rutils "scroll-tech/rollup/internal/utils"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
// Batch represents a batch of chunks.
@@ -137,6 +136,9 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
@@ -147,7 +149,12 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
// Get the latest batch
latestBatch, err := o.GetLatestBatch(ctx)
if err != nil {
return 0, fmt.Errorf("Batch.GetFirstUnbatchedChunkIndex error: %w", err)
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
// which means there is not batched chunk yet, thus returns 0
if latestBatch == nil {
return 0, nil
}
return latestBatch.EndChunkIndex + 1, nil
}
@@ -219,7 +226,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Batch, error) {
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil {
return nil, errors.New("invalid args: batch is nil")
}
@@ -229,48 +236,94 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
return nil, errors.New("invalid args: batch contains 0 chunk")
}
metrics, err := rutils.CalculateBatchMetrics(batch, codecVersion)
daBatch, err := codecv0.NewDABatch(batch)
if err != nil {
log.Error("failed to calculate batch metrics",
log.Error("failed to create new DA batch",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, err
}
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(batch)
if err != nil {
log.Error("failed to estimate batch L1 commit gas",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(batch)
if err != nil {
log.Error("failed to estimate batch L1 commit calldata size",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
var startChunkIndex uint64
if batch.Index > 0 {
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
if getErr != nil {
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil {
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// no batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
startDAChunkHash, err := startDAChunk.Hash()
if err != nil {
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
for i := uint64(0); i < numChunks-1; i++ {
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
}
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
endDAChunkHash, err := endDAChunk.Hash()
if err != nil {
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
Hash: daBatch.Hash().Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
EndChunkHash: endDAChunkHash.Hex(),
EndChunkIndex: startChunkIndex + numChunks - 1,
StateRoot: batch.StateRoot().Hex(),
WithdrawRoot: batch.WithdrawRoot().Hex(),
ParentBatchHash: batch.ParentBatchHash.Hex(),
BatchHeader: batchMeta.BatchBytes,
BatchHeader: daBatch.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
}
db := o.db

View File

@@ -8,8 +8,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/rollup/internal/utils"
"scroll-tech/common/types/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -111,7 +110,7 @@ func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
// Get the latest chunk
latestChunk, err := o.getLatestChunk(ctx)
if err != nil {
return 0, fmt.Errorf("Chunk.GetUnchunkedBlockHeight error: %w", err)
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
if latestChunk == nil {
// if there is no chunk, return block number 1,
@@ -141,7 +140,7 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
@@ -166,30 +165,42 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
parentChunkStateRoot = parentChunk.StateRoot
}
metrics, err := utils.CalculateChunkMetrics(chunk, codecVersion)
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to calculate chunk metrics", "err", err)
log.Error("failed to initialize new DA chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
daChunkHash, err := daChunk.Hash()
if err != nil {
log.Error("failed to get chunk hash", "err", err)
log.Error("failed to get DA chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit gas", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
Hash: chunkHash.Hex(),
Hash: daChunkHash.Hex(),
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.L2GasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),

View File

@@ -17,7 +17,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/database/migrate"
)
@@ -30,8 +29,12 @@ var (
batchOrm *Batch
pendingTransactionOrm *PendingTransaction
block1 *encoding.Block
block2 *encoding.Block
block1 *encoding.Block
block2 *encoding.Block
chunk1 *encoding.Chunk
chunk2 *encoding.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
func TestMain(m *testing.M) {
@@ -74,6 +77,18 @@ func setupEnv(t *testing.T) {
block2 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace, block2)
assert.NoError(t, err)
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
daChunk1, err := codecv0.NewDAChunk(chunk1, 0)
assert.NoError(t, err)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
daChunk2, err := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, err)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
}
func tearDownEnv(t *testing.T) {
@@ -165,196 +180,147 @@ func TestL2BlockOrm(t *testing.T) {
}
func TestChunkOrm(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
for _, codecVersion := range codecVersions {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
var chunkHash1 common.Hash
var chunkHash2 common.Hash
if codecVersion == encoding.CodecV0 {
daChunk1, createErr := codecv0.NewDAChunk(chunk1, 0)
assert.NoError(t, createErr)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
daChunk2, createErr := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, createErr)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
} else {
daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0)
assert.NoError(t, createErr)
chunkHash1, err = daChunk1.Hash()
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
daChunk2, createErr := codecv1.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, createErr)
chunkHash2, err = daChunk2.Hash()
assert.NoError(t, err)
}
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
assert.NoError(t, err)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
func TestBatchOrm(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
for _, codecVersion := range codecVersions {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
var batchHash1 string
if codecVersion == encoding.CodecV0 {
daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, createErr)
batchHash1 = daBatch1.Hash().Hex()
} else {
daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, createErr)
batchHash1 = daBatch1.Hash().Hex()
}
assert.Equal(t, hash1, batchHash1)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk2},
}
batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
var batchHash2 string
if codecVersion == encoding.CodecV0 {
daBatch2, createErr := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, createErr)
batchHash2 = daBatch2.Hash().Hex()
} else {
daBatch2, createErr := codecv1.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, createErr)
batchHash2 = daBatch2.Hash().Hex()
}
assert.Equal(t, hash2, batchHash2)
count, err := batchOrm.GetBatchCount(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
assert.NoError(t, err)
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
assert.NoError(t, err)
assert.Equal(t, 2, len(pendingBatches))
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, 2, len(rollupStatus))
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch1, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
daBatch1, err := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
assert.NoError(t, err)
batchHash1 := daBatch1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batch2, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
daBatch2, err := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
assert.NoError(t, err)
batchHash2 := daBatch2.Hash().Hex()
assert.Equal(t, hash2, batchHash2)
count, err := batchOrm.GetBatchCount(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
assert.NoError(t, err)
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
assert.NoError(t, err)
assert.Equal(t, 2, len(pendingBatches))
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, 2, len(rollupStatus))
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
}
func TestPendingTransactionOrm(t *testing.T) {
func TestTransactionOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))

View File

@@ -9,10 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
bridgeAbi "scroll-tech/rollup/abi"
)
@@ -67,228 +63,3 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ChunkMetrics indicates the metrics for proposing a chunk.
type ChunkMetrics struct {
// common metrics
NumBlocks uint64
TxNum uint64
CrcMax uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
}
// CalculateChunkMetrics calculates chunk metrics.
func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) (*ChunkMetrics, error) {
var err error
metrics := &ChunkMetrics{
TxNum: chunk.NumTransactions(),
NumBlocks: uint64(len(chunk.Blocks)),
FirstBlockTimestamp: chunk.Blocks[0].Header.Time,
}
metrics.CrcMax, err = chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max: %w", err)
}
switch codecVersion {
case encoding.CodecV0:
metrics.L1CommitCalldataSize, err = codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
}
metrics.L1CommitGas, err = codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
}
return metrics, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// BatchMetrics indicates the metrics for proposing a batch.
type BatchMetrics struct {
// common metrics
NumChunks uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
var err error
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
}
switch codecVersion {
case encoding.CodecV0:
metrics.L1CommitGas, err = codecv0.EstimateBatchL1CommitGas(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit gas: %w", err)
}
metrics.L1CommitCalldataSize, err = codecv0.EstimateBatchL1CommitCalldataSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size: %w", err)
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
}
return metrics, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// GetChunkHash retrieves the hash of a chunk.
func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, codecVersion encoding.CodecVersion) (common.Hash, error) {
switch codecVersion {
case encoding.CodecV0:
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to create codecv0 DA chunk: %w", err)
}
chunkHash, err := daChunk.Hash()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get codecv0 DA chunk hash: %w", err)
}
return chunkHash, nil
case encoding.CodecV1:
daChunk, err := codecv1.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to create codecv1 DA chunk: %w", err)
}
chunkHash, err := daChunk.Hash()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get codecv1 DA chunk hash: %w", err)
}
return chunkHash, nil
default:
return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// BatchMetadata represents the metadata of a batch.
type BatchMetadata struct {
BatchHash common.Hash
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
numChunks := len(batch.Chunks)
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
for i := 0; i < numChunks-1; i++ {
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
}
switch codecVersion {
case encoding.CodecV0:
daBatch, err := codecv0.NewDABatch(batch)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 start DA chunk: %w", err)
}
batchMeta.StartChunkHash, err = startDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv0 start DA chunk hash: %w", err)
}
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
return nil, fmt.Errorf("failed to create codecv0 end DA chunk: %w", err)
}
batchMeta.EndChunkHash, err = endDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv0 end DA chunk hash: %w", err)
}
return batchMeta, nil
case encoding.CodecV1:
daBatch, err := codecv1.NewDABatch(batch)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 start DA chunk: %w", err)
}
batchMeta.StartChunkHash, err = startDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 start DA chunk hash: %w", err)
}
endDAChunk, err := codecv1.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
return nil, fmt.Errorf("failed to create codecv1 end DA chunk: %w", err)
}
batchMeta.EndChunkHash, err = endDAChunk.Hash()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 end DA chunk hash: %w", err)
}
return batchMeta, nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}
// GetTotalL1MessagePoppedBeforeBatch retrieves the total L1 messages popped before the batch.
func GetTotalL1MessagePoppedBeforeBatch(parentBatchBytes []byte, codecVersion encoding.CodecVersion) (uint64, error) {
switch codecVersion {
case encoding.CodecV0:
parentDABatch, err := codecv0.NewDABatchFromBytes(parentBatchBytes)
if err != nil {
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv0, err: %w", err)
}
return parentDABatch.TotalL1MessagePopped, nil
case encoding.CodecV1:
parentDABatch, err := codecv1.NewDABatchFromBytes(parentBatchBytes)
if err != nil {
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv1, err: %w", err)
}
return parentDABatch.TotalL1MessagePopped, nil
default:
return 0, fmt.Errorf("unsupported codec version: %v", codecVersion)
}
}

View File

@@ -1,607 +1,346 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {BatchHeaderV0Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
import {BatchHeaderV1Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV1Codec.sol";
import {ChunkCodecV0} from "../../../contracts/src/libraries/codec/ChunkCodecV0.sol";
import {ChunkCodecV1} from "../../../contracts/src/libraries/codec/ChunkCodecV1.sol";
import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
import {ChunkCodecV0} from "../../contracts/src/libraries/codec/ChunkCodecV0.sol";
import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol";
contract MockBridge {
/// @dev Thrown when committing a committed batch.
error ErrorBatchIsAlreadyCommitted();
/******************************
* Events from L1MessageQueue *
******************************/
/// @dev Thrown when finalizing a verified batch.
error ErrorBatchIsAlreadyVerified();
/// @notice Emitted when a new L1 => L2 transaction is appended to the queue.
/// @param sender The address of account who initiates the transaction.
/// @param target The address of account who will recieve the transaction.
/// @param value The value passed with the transaction.
/// @param queueIndex The index of this transaction in the queue.
/// @param gasLimit Gas limit required to complete the message relay on L2.
/// @param data The calldata of the transaction.
event QueueTransaction(
address indexed sender,
address indexed target,
uint256 value,
uint64 queueIndex,
uint256 gasLimit,
bytes data
);
/// @dev Thrown when committing empty batch (batch without chunks)
error ErrorBatchIsEmpty();
/*********************************
* Events from L1ScrollMessenger *
*********************************/
/// @dev Thrown when call precompile failed.
error ErrorCallPointEvaluationPrecompileFailed();
/// @notice Emitted when a cross domain message is sent.
/// @param sender The address of the sender who initiates the message.
/// @param target The address of target contract to call.
/// @param value The amount of value passed to the target contract.
/// @param messageNonce The nonce of the message.
/// @param gasLimit The optional gas limit passed to L1 or L2.
/// @param message The calldata passed to the target contract.
event SentMessage(
address indexed sender,
address indexed target,
uint256 value,
uint256 messageNonce,
uint256 gasLimit,
bytes message
);
/// @dev Thrown when the transaction has multiple blobs.
error ErrorFoundMultipleBlob();
/// @notice Emitted when a cross domain message is relayed successfully.
/// @param messageHash The hash of the message.
event RelayedMessage(bytes32 indexed messageHash);
/// @dev Thrown when some fields are not zero in genesis batch.
error ErrorGenesisBatchHasNonZeroField();
/***************************
* Events from ScrollChain *
***************************/
/// @dev Thrown when importing genesis batch twice.
error ErrorGenesisBatchImported();
/// @notice Emitted when a new batch is committed.
/// @param batchHash The hash of the batch.
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
/// @dev Thrown when data hash in genesis batch is zero.
error ErrorGenesisDataHashIsZero();
/// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/// @dev Thrown when the parent batch hash in genesis batch is zero.
error ErrorGenesisParentBatchHashIsNonZero();
/***********
* Structs *
***********/
/// @dev Thrown when the l2 transaction is incomplete.
error ErrorIncompleteL2TransactionData();
struct L2MessageProof {
// The index of the batch where the message belongs to.
uint256 batchIndex;
// Concatenation of merkle proof for withdraw merkle trie.
bytes merkleProof;
}
/// @dev Thrown when the batch hash is incorrect.
error ErrorIncorrectBatchHash();
/*************
* Variables *
*************/
/// @dev Thrown when the batch index is incorrect.
error ErrorIncorrectBatchIndex();
uint256 public messageNonce;
mapping(uint256 => bytes32) public committedBatches;
uint256 public l2BaseFee;
/// @dev Thrown when the previous state root doesn't match stored one.
error ErrorIncorrectPreviousStateRoot();
/***********************************
* Functions from L2GasPriceOracle *
***********************************/
/// @dev Thrown when the batch header version is invalid.
error ErrorInvalidBatchHeaderVersion();
function setL2BaseFee(uint256 _newL2BaseFee) external {
l2BaseFee = _newL2BaseFee;
}
/// @dev Thrown when no blob found in the transaction.
error ErrorNoBlobFound();
/************************************
* Functions from L1ScrollMessenger *
************************************/
/// @dev Thrown when the number of transactions is less than number of L1 message in one block.
error ErrorNumTxsLessThanNumL1Msgs();
/// @dev Thrown when the given previous state is zero.
error ErrorPreviousStateRootIsZero();
/// @dev Thrown when the given state root is zero.
error ErrorStateRootIsZero();
/// @dev Thrown when a chunk contains too many transactions.
error ErrorTooManyTxsInOneChunk();
/// @dev Thrown when the precompile output is incorrect.
error ErrorUnexpectedPointEvaluationPrecompileOutput();
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
struct L2MessageProof {
uint256 batchIndex;
bytes merkleProof;
}
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 public l2BaseFee;
uint256 public lastFinalizedBatchIndex;
mapping(uint256 => bytes32) public committedBatches;
mapping(uint256 => bytes32) public finalizedStateRoots;
mapping(uint256 => bytes32) public withdrawRoots;
function setL2BaseFee(uint256 _newL2BaseFee) external {
l2BaseFee = _newL2BaseFee;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Import layer 2 genesis block
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
// check genesis batch header length
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// check whether the genesis batch is imported
if (finalizedStateRoots[0] != bytes32(0)) revert ErrorGenesisBatchImported();
(uint256 memPtr, bytes32 _batchHash, , ) = _loadBatchHeader(_batchHeader);
// check all fields except `dataHash` and `lastBlockHash` are zero
unchecked {
uint256 sum = BatchHeaderV0Codec.getVersion(memPtr) +
BatchHeaderV0Codec.getBatchIndex(memPtr) +
BatchHeaderV0Codec.getL1MessagePopped(memPtr) +
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr);
if (sum != 0) revert ErrorGenesisBatchHasNonZeroField();
}
if (BatchHeaderV0Codec.getDataHash(memPtr) == bytes32(0)) revert ErrorGenesisDataHashIsZero();
if (BatchHeaderV0Codec.getParentBatchHash(memPtr) != bytes32(0)) revert ErrorGenesisParentBatchHashIsNonZero();
committedBatches[0] = _batchHash;
finalizedStateRoots[0] = _stateRoot;
emit CommitBatch(0, _batchHash);
emit FinalizeBatch(0, _batchHash, _stateRoot, bytes32(0));
}
function commitBatch(
uint8 _version,
bytes calldata _parentBatchHeader,
bytes[] memory _chunks,
bytes calldata
) external {
// check whether the batch is empty
if (_chunks.length == 0) revert ErrorBatchIsEmpty();
(, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader(
_parentBatchHeader
);
unchecked {
_batchIndex += 1;
}
if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted();
bytes32 _batchHash;
uint256 batchPtr;
bytes32 _dataHash;
uint256 _totalL1MessagesPoppedInBatch;
if (_version == 0) {
(_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV0(
_totalL1MessagesPoppedOverall,
_chunks
);
assembly {
batchPtr := mload(0x40)
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV0Codec.storeVersion(batchPtr, 0);
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
BatchHeaderV0Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
// compute batch hash
_batchHash = BatchHeaderV0Codec.computeBatchHash(
batchPtr,
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH
);
} else if (_version == 1) {
bytes32 blobVersionedHash;
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
_totalL1MessagesPoppedOverall,
_chunks
);
assembly {
batchPtr := mload(0x40)
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash);
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash);
BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
// compute batch hash
_batchHash = BatchHeaderV1Codec.computeBatchHash(
batchPtr,
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH
);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
committedBatches[_batchIndex] = _batchHash;
emit CommitBatch(_batchIndex, _batchHash);
}
/// @dev We keep this function to upgrade to 4844 more smoothly.
function finalizeBatchWithProof(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata
) external {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/// @dev Memory layout of `_blobDataProof`:
/// ```text
/// | z | y | kzg_commitment | kzg_proof |
/// |---------|---------|----------------|-----------|
/// | bytes32 | bytes32 | bytes48 | bytes48 |
/// ```
function finalizeBatchWithProof4844(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata _blobDataProof,
bytes calldata
) external {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
// Calls the point evaluation precompile and verifies the output
{
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
abi.encodePacked(_blobVersionedHash, _blobDataProof)
);
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
(, uint256 result) = abi.decode(data, (uint256, uint256));
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to commit chunks with version 0
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
/// @param _chunks The list of chunks to commit.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
function _commitChunksV0(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks
) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
uint256 _chunksLength = _chunks.length;
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
uint256 batchDataHashPtr;
assembly {
batchDataHashPtr := mload(0x40)
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
}
// compute the data hash for each chunk
for (uint256 i = 0; i < _chunksLength; i++) {
uint256 _totalNumL1MessagesInChunk;
bytes32 _chunkDataHash;
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV0(
_chunks[i],
_totalL1MessagesPoppedInBatch,
_totalL1MessagesPoppedOverall
);
unchecked {
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
}
assembly {
mstore(batchDataHashPtr, _chunkDataHash)
batchDataHashPtr := add(batchDataHashPtr, 0x20)
}
}
assembly {
let dataLen := mul(_chunksLength, 0x20)
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
}
}
/// @dev Internal function to commit chunks with version 1
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
/// @param _chunks The list of chunks to commit.
/// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
function _commitChunksV1(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks
)
internal
view
returns (
bytes32 _blobVersionedHash,
bytes32 _batchDataHash,
uint256 _totalL1MessagesPoppedInBatch
)
function sendMessage(
address target,
uint256 value,
bytes calldata message,
uint256 gasLimit
) external payable {
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
{
{
bytes32 _secondBlob;
// Get blob's versioned hash
assembly {
_blobVersionedHash := blobhash(0)
_secondBlob := blobhash(1)
}
if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound();
if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob();
}
uint256 _chunksLength = _chunks.length;
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
uint256 batchDataHashPtr;
assembly {
batchDataHashPtr := mload(0x40)
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
}
// compute the data hash for each chunk
for (uint256 i = 0; i < _chunksLength; i++) {
uint256 _totalNumL1MessagesInChunk;
bytes32 _chunkDataHash;
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV1(
_chunks[i],
_totalL1MessagesPoppedInBatch,
_totalL1MessagesPoppedOverall
);
unchecked {
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
}
assembly {
mstore(batchDataHashPtr, _chunkDataHash)
batchDataHashPtr := add(batchDataHashPtr, 0x20)
}
}
// compute the data hash for current batch
assembly {
let dataLen := mul(_chunksLength, 0x20)
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
}
address _sender = applyL1ToL2Alias(address(this));
emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata);
}
/// @dev Internal function to load batch header from calldata to memory.
/// @param _batchHeader The batch header in calldata.
/// @return batchPtr The start memory offset of loaded batch header.
/// @return _batchHash The hash of the loaded batch header.
/// @return _batchIndex The index of this batch.
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped after this batch.
function _loadBatchHeader(bytes calldata _batchHeader)
internal
view
returns (
uint256 batchPtr,
bytes32 _batchHash,
uint256 _batchIndex,
uint256 _totalL1MessagesPoppedOverall
)
{
// load version from batch header, it is always the first byte.
uint256 version;
assembly {
version := shr(248, calldataload(_batchHeader.offset))
}
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
messageNonce += 1;
}
// version should be always 0 or 1 in current code
uint256 _length;
if (version == 0) {
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
} else if (version == 1) {
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// only check when genesis is imported
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
revert ErrorIncorrectBatchHash();
}
_totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
emit RelayedMessage(_xDomainCalldataHash);
}
/******************************
* Functions from ScrollChain *
******************************/
/// @notice Import layer 2 genesis block
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
}
function commitBatch(
uint8 /*version*/,
bytes calldata _parentBatchHeader,
bytes[] memory chunks,
bytes calldata /*skippedL1MessageBitmap*/
) external {
// check whether the batch is empty
uint256 _chunksLength = chunks.length;
require(_chunksLength > 0, "batch is empty");
// decode batch index
uint256 headerLength = _parentBatchHeader.length;
uint256 parentBatchPtr;
uint256 parentBatchIndex;
assembly {
parentBatchPtr := mload(0x40)
calldatacopy(parentBatchPtr, _parentBatchHeader.offset, headerLength)
mstore(0x40, add(parentBatchPtr, headerLength))
parentBatchIndex := shr(192, mload(add(parentBatchPtr, 1)))
}
/// @dev Internal function to commit a chunk with version 0.
/// @param _chunk The encoded chunk to commit.
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in the current batch before this chunk.
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including the current batch, before this chunk.
/// @return _dataHash The computed data hash for this chunk.
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
function _commitChunkV0(
bytes memory _chunk,
uint256 _totalL1MessagesPoppedInBatch,
uint256 _totalL1MessagesPoppedOverall
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
}
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts, use scope to avoid stack too deep
{
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
}
}
// concatenate tx hashes
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
chunkPtr += 1;
while (_numBlocks > 0) {
// concatenate l1 message hashes
uint256 _numL1MessagesInBlock = ChunkCodecV0.getNumL1Messages(chunkPtr);
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
assembly {
mstore(dataPtr, txHash)
dataPtr := add(dataPtr, 0x20)
}
}
unchecked {
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
_numBlocks -= 1;
chunkPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
assembly {
chunkPtr := add(_chunk, 0x20)
}
// check chunk has correct length
if (l2TxPtr - chunkPtr != _chunk.length) revert ErrorIncompleteL2TransactionData();
// compute data hash and store to memory
assembly {
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
}
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
}
/// @dev Internal function to commit a chunk with version 1.
/// @param _chunk The encoded chunk to commit.
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in current batch.
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including current batch.
/// @return _dataHash The computed data hash for this chunk.
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
function _commitChunkV1(
bytes memory _chunk,
uint256 _totalL1MessagesPoppedInBatch,
uint256 _totalL1MessagesPoppedOverall
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
for (uint256 i = 0; i < _chunksLength; i++) {
_commitChunk(dataPtr, chunks[i]);
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
}
uint256 _numBlocks = ChunkCodecV1.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts, use scope to avoid stack too deep
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV1.copyBlockContext(chunkPtr, dataPtr, i);
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(blockPtr);
unchecked {
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalNumL1MessagesInChunk, 0x20))) // reserve memory for l1 message hashes
chunkPtr := add(chunkPtr, 1)
}
// the number of actual transactions in one chunk: non-skipped l1 messages + l2 txs
uint256 _totalTransactionsInChunk;
// concatenate tx hashes
while (_numBlocks > 0) {
// concatenate l1 message hashes
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(chunkPtr);
uint256 startPtr = dataPtr;
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
unchecked {
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
_numBlocks -= 1;
chunkPtr += ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
}
}
// compute data hash and store to memory
assembly {
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
}
unchecked {
dataPtr += 32;
}
}
function verifyProof(
bytes32 claim,
bytes memory commitment,
bytes memory proof
) external view {
require(commitment.length == 48, "Commitment must be 48 bytes");
require(proof.length == 48, "Proof must be 48 bytes");
bytes32 versionedHash = blobhash(0);
// Compute random challenge point.
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
bytes memory pointEvaluationCalldata = abi.encodePacked(
versionedHash,
point,
claim,
commitment,
proof
);
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(pointEvaluationCalldata);
if (!success) {
revert("Proof verification failed");
}
bytes32 _dataHash;
assembly {
let dataLen := mul(_chunksLength, 0x20)
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
}
bytes memory paddedData = new bytes(89);
assembly {
mstore(add(paddedData, 57), _dataHash)
}
uint256 batchPtr;
assembly {
batchPtr := add(paddedData, 32)
}
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
committedBatches[0] = _batchHash;
emit CommitBatch(parentBatchIndex + 1, _batchHash);
}
function finalizeBatchWithProof(
bytes calldata batchHeader,
bytes32 /*prevStateRoot*/,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata /*aggrProof*/
) external {
// decode batch index
uint256 headerLength = batchHeader.length;
uint256 batchPtr;
uint256 batchIndex;
assembly {
batchPtr := mload(0x40)
calldatacopy(batchPtr, batchHeader.offset, headerLength)
mstore(0x40, add(batchPtr, headerLength))
batchIndex := shr(192, mload(add(batchPtr, 1)))
}
bytes32 _batchHash = committedBatches[0];
emit FinalizeBatch(batchIndex, _batchHash, postStateRoot, withdrawRoot);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to generate the correct cross domain calldata for a message.
/// @param _sender Message sender address.
/// @param _target Target contract address.
/// @param _value The amount of ETH pass to the target.
/// @param _messageNonce Nonce for the provided message.
/// @param _message Message to send to the target.
/// @return ABI encoded cross domain calldata.
function _encodeXDomainCalldata(
address _sender,
address _target,
uint256 _value,
uint256 _messageNonce,
bytes memory _message
) internal pure returns (bytes memory) {
return
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
_sender,
_target,
_value,
_messageNonce,
_message
);
}
/// @notice Utility function that converts the address in the L1 that submitted a tx to
/// the inbox to the msg.sender viewed in the L2
/// @param l1Address the address in the L1 that triggered the tx to L2
/// @return l2Address L2 address as viewed in msg.sender
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
unchecked {
l2Address = address(uint160(l1Address) + offset);
}
}
function _commitChunk(
uint256 memPtr,
bytes memory _chunk
) internal pure {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
uint256 blockPtr;
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
blockPtr := add(chunkPtr, 1) // skip numBlocks
}
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
blockPtr := add(chunkPtr, 1) // reset block ptr
}
// concatenate tx hashes
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
while (_numBlocks > 0) {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
for (uint256 j = 0; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
assembly {
mstore(dataPtr, txHash)
dataPtr := add(dataPtr, 0x20)
}
}
unchecked {
_numBlocks -= 1;
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
}
}
// check chunk has correct length
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
// compute data hash and store to memory
assembly {
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
mstore(memPtr, dataHash)
}
}
address private constant POINT_EVALUATION_PRECOMPILE_ADDRESS = 0x000000000000000000000000000000000000000A;
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
function verifyProof(
bytes32 claim,
bytes memory commitment,
bytes memory proof
) external view {
require(commitment.length == 48, "Commitment must be 48 bytes");
require(proof.length == 48, "Proof must be 48 bytes");
bytes32 versionedHash = blobhash(0);
// Compute random challenge point.
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
bytes memory pointEvaluationCalldata = abi.encodePacked(
versionedHash,
point,
claim,
commitment,
proof
);
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDRESS.staticcall(pointEvaluationCalldata);
if (!success) {
revert("Proof verification failed");
}
}
}

View File

@@ -135,7 +135,7 @@ func prepareContracts(t *testing.T) {
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
assert.NoError(t, err)
scrollChainAddress := crypto.CreateAddress(l1Auth.From, nonce)
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
tx := types.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
assert.NoError(t, err)
err = l1Client.SendTransaction(context.Background(), signedTx)
@@ -174,8 +174,6 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844)
t.Run("TestCommitBatchAndFinalizeBatchBeforeAndPost4844", testCommitBatchAndFinalizeBatchBeforeAndPost4844)
// l1/l2 gas oracle
t.Run("TestImportL1GasPrice", testImportL1GasPrice)

View File

@@ -7,7 +7,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
@@ -70,7 +69,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -98,7 +97,7 @@ func testImportL2GasPrice(t *testing.T) {
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
_, err = batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err)
// check db status

View File

@@ -29,7 +29,7 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, l2Relayer)
defer l2Relayer.StopSenders()
@@ -59,7 +59,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -69,18 +69,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
for i := 0; i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
Number: big.NewInt(int64(i)),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
})
}
@@ -97,14 +96,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
@@ -116,6 +107,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, chunks, 1)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, &params.ChainConfig{}, db, nil)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
@@ -178,256 +175,3 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
prepareContracts(t)
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
chainConfig := &params.ChainConfig{BanachBlock: big.NewInt(0)}
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Create L1Watcher
l1Cfg := rollupApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
})
}
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
batch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.CommitTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
// add dummy proof
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
func testCommitBatchAndFinalizeBatchBeforeAndPost4844(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
prepareContracts(t)
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
chainConfig := &params.ChainConfig{BanachBlock: big.NewInt(5)}
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Create L1Watcher
l1Cfg := rollupApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
})
}
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)
cp.TryProposeChunk()
cp.TryProposeChunk()
bp.TryProposeBatch()
bp.TryProposeBatch()
for i := uint64(0); i < 2; i++ {
l2Relayer.ProcessPendingBatches()
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.CommitTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
// add dummy proof
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
// fetch rollup events
assert.Eventually(t, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
}, 30*time.Second, time.Second)
assert.Eventually(t, func() bool {
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
}, 30*time.Second, time.Second)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -112,7 +112,7 @@ func TestCoordinatorProverInteraction(t *testing.T) {
t.Log(version.Version)
base.Timestamp = time.Now().Nanosecond()
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free()
@@ -147,7 +147,7 @@ func TestProverReLogin(t *testing.T) {
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
base.Timestamp = time.Now().Nanosecond()
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free()