Compare commits

..

5 Commits

Author SHA1 Message Date
HAOYUatHZ
20e13445f8 refactor: rename roller to prover (#684) 2023-07-28 22:59:27 +08:00
ChuhanJin
e780994146 fix(bridge-history-api): fix error not return 0 (#685) 2023-07-28 22:18:44 +08:00
ChuhanJin
f9a0de0f16 feat(bridge-history-api): switch to v2 decoding (#683)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-28 13:55:55 +02:00
colin
3eb62880fe feat(orm): add more info for agg proving (#678) 2023-07-28 19:27:15 +08:00
Haichen Shen
e2612a3d88 fix(contracts): Fix typos in the contract function name and docs (#679) 2023-07-27 09:58:07 -07:00
121 changed files with 1337 additions and 1376 deletions

View File

@@ -1,4 +1,4 @@
name: Roller
name: Prover
on:
push:
@@ -8,8 +8,8 @@ on:
- develop
- alpha
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
@@ -17,12 +17,12 @@ on:
- synchronize
- ready_for_review
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'roller'
working-directory: 'prover'
jobs:
test:
@@ -43,7 +43,7 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
flags: prover
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
@@ -65,7 +65,7 @@ jobs:
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make roller
make prover
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
@@ -92,7 +92,7 @@ jobs:
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/roller/ -w .
- run: goimports -local scroll-tech/prover/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy

View File

@@ -10,7 +10,7 @@ lint: ## The code's format and security checks.
make -C common lint
make -C coordinator lint
make -C database lint
make -C roller lint
make -C prover lint
make -C bridge-history-api lint
update: ## update dependencies
@@ -20,13 +20,13 @@ update: ## update dependencies
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/roller/ -w .
goimports -local $(PWD)/prover/ -w .
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/

View File

@@ -39,6 +39,9 @@ func (r *RollupBatch) GetLatestRollupBatchProcessedHeight(ctx context.Context) (
var result RollupBatch
err := r.db.WithContext(ctx).Unscoped().Select("commit_height").Order("id desc").First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RollupBatch.GetLatestRollupBatchProcessedHeight error: %w", err)
}
return result.CommitHeight, nil

View File

@@ -346,7 +346,6 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
// ParseBatchInfoFromScrollChain parses ScrollChain events
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata)
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSignature:
@@ -356,42 +355,22 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
log.Warn("Failed to unpack CommitBatch event", "err", err)
return rollupBatches, err
}
if _, ok := cache[vlog.TxHash.Hex()]; ok {
c := cache[vlog.TxHash.Hex()]
c.CallDataIndex++
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: c.BatchIndices[c.CallDataIndex],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
})
cache[vlog.TxHash.Hex()] = c
continue
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return rollupBatches, err
}
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data())
index, startBlock, endBlock, err := GetBatchRangeFromCalldataV2(commitTx.Data())
if err != nil {
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
return rollupBatches, err
}
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
CallDataIndex: 0,
BatchIndices: indices,
StartBlocks: startBlocks,
EndBlocks: endBlocks,
}
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: indices[0],
BatchIndex: index,
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: startBlocks[0],
EndBlockNumber: endBlocks[0],
StartBlockNumber: startBlock,
EndBlockNumber: endBlock,
})
default:

View File

@@ -109,7 +109,7 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}

View File

@@ -171,9 +171,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
chunk := &types.Chunk{
Blocks: []*types.WrappedBlock{{
Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
Header: genesis,
Transactions: nil,
WithdrawRoot: common.Hash{},
}},
}
@@ -417,7 +417,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// It's an intermediate state. The prover manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskVerified:

View File

@@ -169,15 +169,15 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
withdrawRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
WithdrawRoot: common.BytesToHash(withdrawRoot),
})
}

View File

@@ -30,6 +30,7 @@ type Batch struct {
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
@@ -258,7 +259,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
ParentBatchHash: parentBatchHash.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),

View File

@@ -26,6 +26,10 @@ type Chunk struct {
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
@@ -118,6 +122,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
var parentChunkHash string
var parentChunkStateRoot string
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
@@ -130,6 +136,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
parentChunkHash = parentChunk.Hash
parentChunkStateRoot = parentChunk.StateRoot
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
@@ -164,6 +172,10 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ParentChunkHash: parentChunkHash,
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}

View File

@@ -19,15 +19,16 @@ type L2Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawRoot string `json:"withdraw_root" gorm:"withdraw_root"`
StateRoot string `json:"state_root" gorm:"state_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
@@ -67,7 +68,7 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Select("header, transactions, withdraw_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
@@ -89,7 +90,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
@@ -133,7 +134,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Select("header, transactions, withdraw_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")
@@ -160,7 +161,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
@@ -184,15 +185,16 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawRoot: block.WithdrawRoot.Hex(),
StateRoot: block.Header.Root.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
}
l2Blocks = append(l2Blocks, l2Block)
}

View File

@@ -60,8 +60,8 @@ contract MockBridgeL1 {
/// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root in layer 2 after this batch.
/// @param withdrawRoot The merkle root in layer2 after this batch.
/// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/***********

View File

@@ -80,8 +80,8 @@ func testImportL2GasPrice(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
Transactions: nil,
WithdrawRoot: common.Hash{},
},
},
}

View File

@@ -45,9 +45,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
BaseFee: big.NewInt(0),
}
wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
Header: &header,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
Header: &header,
Transactions: nil,
WithdrawRoot: common.Hash{},
})
}

View File

@@ -25,7 +25,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -8,7 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -8,7 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -8,7 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -8,7 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -8,7 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./roller/go.* ./roller/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x

View File

@@ -31,7 +31,7 @@ flag_management:
- type: project
target: auto
threshold: 1%
- name: roller
- name: prover
statuses:
- type: project
target: auto

View File

@@ -13,12 +13,12 @@ import (
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
// WrappedBlock contains the block's Header, Transactions and WithdrawRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// NumL1Messages returns the number of L1 messages in this block.

View File

@@ -68,28 +68,28 @@ const (
MsgRelayFailed
)
// RollerProveStatus is the roller prove status of a block batch (session)
type RollerProveStatus int32
// ProverProveStatus is the prover prove status of a block batch (session)
type ProverProveStatus int32
const (
// RollerProveStatusUndefined indicates an unknown roller proving status
RollerProveStatusUndefined RollerProveStatus = iota
// RollerAssigned indicates roller assigned but has not submitted proof
RollerAssigned
// RollerProofValid indicates roller has submitted valid proof
RollerProofValid
// RollerProofInvalid indicates roller has submitted invalid proof
RollerProofInvalid
// ProverProveStatusUndefined indicates an unknown prover proving status
ProverProveStatusUndefined ProverProveStatus = iota
// ProverAssigned indicates prover assigned but has not submitted proof
ProverAssigned
// ProverProofValid indicates prover has submitted valid proof
ProverProofValid
// ProverProofInvalid indicates prover has submitted invalid proof
ProverProofInvalid
)
func (s RollerProveStatus) String() string {
func (s ProverProveStatus) String() string {
switch s {
case RollerAssigned:
return "RollerAssigned"
case RollerProofValid:
return "RollerProofValid"
case RollerProofInvalid:
return "RollerProofInvalid"
case ProverAssigned:
return "ProverAssigned"
case ProverProofValid:
return "ProverProofValid"
case ProverProofInvalid:
return "ProverProofInvalid"
default:
return fmt.Sprintf("Bad Value: %d", int32(s))
}
@@ -99,7 +99,7 @@ func (s RollerProveStatus) String() string {
type ProverTaskFailureType int
const (
// ProverTaskFailureTypeUndefined indicates an unknown roller failure type
// ProverTaskFailureTypeUndefined indicates an unknown prover failure type
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
// ProverTaskFailureTypeTimeout prover task failure of timeout
ProverTaskFailureTypeTimeout

View File

@@ -6,30 +6,30 @@ import (
"github.com/stretchr/testify/assert"
)
func TestRollerProveStatus(t *testing.T) {
func TestProverProveStatus(t *testing.T) {
tests := []struct {
name string
s RollerProveStatus
s ProverProveStatus
want string
}{
{
"RollerAssigned",
RollerAssigned,
"RollerAssigned",
"ProverAssigned",
ProverAssigned,
"ProverAssigned",
},
{
"RollerProofValid",
RollerProofValid,
"RollerProofValid",
"ProverProofValid",
ProverProofValid,
"ProverProofValid",
},
{
"RollerProofInvalid",
RollerProofInvalid,
"RollerProofInvalid",
"ProverProofInvalid",
ProverProofInvalid,
"ProverProofInvalid",
},
{
"Bad Value",
RollerProveStatus(999), // Invalid value.
ProverProveStatus(999), // Invalid value.
"Bad Value: 999",
},
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/scroll-tech/go-ethereum/rlp"
)
// RespStatus represents status code from roller to scroll
// RespStatus represents status code from prover to scroll
type RespStatus uint32
const (
@@ -23,7 +23,7 @@ const (
StatusProofError
)
// ProofType represents the type of roller.
// ProofType represents the type of prover.
type ProofType uint8
func (r ProofType) String() string {
@@ -40,28 +40,28 @@ func (r ProofType) String() string {
const (
// ProofTypeUndefined is an unknown proof type
ProofTypeUndefined ProofType = iota
// ProofTypeChunk is default roller, it only generates zk proof from traces.
// ProofTypeChunk is default prover, it only generates zk proof from traces.
ProofTypeChunk
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch
)
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
// It effectively acts as a registration, and makes the Roller identification
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Roller signature
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the roller.
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// Roller name
// Prover name
Name string `json:"name"`
// Roller RollerType
RollerType ProofType `json:"roller_type,omitempty"`
// Prover ProverType
ProverType ProofType `json:"prover_type,omitempty"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
Version string `json:"version"`
@@ -140,10 +140,10 @@ func (i *Identity) Hash() ([]byte, error) {
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
// Roller signature
// Prover signature
Signature string `json:"signature"`
// Roller public key
// Prover public key
publicKey string
}
@@ -204,13 +204,13 @@ func (a *ProofMsg) PublicKey() (string, error) {
type TaskMsg struct {
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
// For decentralization, basic provers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic provers.
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
// Only applicable for aggregator rollers.
// Only applicable for aggregator provers.
SubProofs []*AggProof `json:"sub_proofs,omitempty"`
}
// ProofDetail is the message received from rollers that contains zk proof, the status of
// ProofDetail is the message received from provers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`

View File

@@ -47,7 +47,7 @@ func TestGenerateToken(t *testing.T) {
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Name: "testName",
RollerType: ProofTypeChunk,
ProverType: ProofTypeChunk,
Version: "testVersion",
Token: "testToken",
}

View File

@@ -24,8 +24,8 @@ var (
CoordinatorApp MockAppName = "coordinator-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// RollerApp the name of mock roller app.
RollerApp MockAppName = "roller-test"
// ProverApp the name of mock prover app.
ProverApp MockAppName = "prover-test"
)
// RegisterSimulation register initializer function for integration-test.

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.28"
var tag = "v4.0.32"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -25,5 +25,5 @@ var commit = func() string {
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)

View File

@@ -24,7 +24,7 @@ The execution in layer 2 may be failed due to out of gas problem. In such case,
### Send Message from L2 to L1
Similar to sending message from L1 to L2, you should call `L2ScrollMessenger.sendMessage` first in layer 2. The `L2ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. Unlike above, the Sequencer will first batch submit layer 2 transactions (or block) to `ZKRollup` contract in layer 1. Then the Sequencer will wait the proof generated by roller and submit the proof to `ZKRollup` contract in layer 1 again. Finally, anyone can call `L1ScrollMessenger.relayMessageWithProof` with correct proof to execute the message in layer 1.
Similar to sending message from L1 to L2, you should call `L2ScrollMessenger.sendMessage` first in layer 2. The `L2ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. Unlike above, the Sequencer will first batch submit layer 2 transactions (or block) to `ZKRollup` contract in layer 1. Then the Sequencer will wait the proof generated by prover and submit the proof to `ZKRollup` contract in layer 1 again. Finally, anyone can call `L1ScrollMessenger.relayMessageWithProof` with correct proof to execute the message in layer 1.
Currently, for the safety reason, we only allow privileged contracts to send cross domain messages. And only privileged accounts can call `L2ScrollMessenger.relayMessage`.

View File

@@ -4,7 +4,7 @@
> L1ERC1155Gateway
The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT in layer 1 and finalize withdraw the NFTs from layer 2.
The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT on layer 1 and finalize withdraw the NFTs from layer 2.
*The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding NFT will be transfer to the recipient directly. This will be changed if we have more specific scenarios.*
@@ -24,7 +24,7 @@ Deposit a list of some ERC1155 NFT to caller&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC1155 NFT in layer 1. |
| _token | address | The address of ERC1155 NFT on layer 1. |
| _tokenIds | uint256[] | The list of token ids to deposit. |
| _amounts | uint256[] | The list of corresponding number of token to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -43,8 +43,8 @@ Deposit a list of some ERC1155 NFT to a recipient&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC1155 NFT in layer 1. |
| _to | address | The address of recipient in layer 2. |
| _token | address | The address of ERC1155 NFT on layer 1. |
| _to | address | The address of recipient on layer 2. |
| _tokenIds | uint256[] | The list of token ids to deposit. |
| _amounts | uint256[] | The list of corresponding number of token to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -80,8 +80,8 @@ Deposit some ERC1155 NFT to a recipient&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC1155 NFT in layer 1. |
| _to | address | The address of recipient in layer 2. |
| _token | address | The address of ERC1155 NFT on layer 1. |
| _to | address | The address of recipient on layer 2. |
| _tokenId | uint256 | The token id to deposit. |
| _amount | uint256 | The amount of token to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -100,7 +100,7 @@ Deposit some ERC1155 NFT to caller&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC1155 NFT in layer 1. |
| _token | address | The address of ERC1155 NFT on layer 1. |
| _tokenId | uint256 | The token id to deposit. |
| _amount | uint256 | The amount of token to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -111,7 +111,7 @@ Deposit some ERC1155 NFT to caller&#39;s account on layer 2.
function finalizeBatchWithdrawERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds, uint256[] _amounts) external nonpayable
```
Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient&#39;s account in layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway in layer 2.
Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient&#39;s account on layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway on layer 2.
@@ -121,8 +121,8 @@ Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipie
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 2. |
| _to | address | The address of recipient in layer 1 to receive the token. |
| _from | address | The address of account who withdraw the token on layer 2. |
| _to | address | The address of recipient on layer 1 to receive the token. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
| _amounts | uint256[] | The list of corresponding number of token to withdraw. |
@@ -132,7 +132,7 @@ Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipie
function finalizeWithdrawERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId, uint256 _amount) external nonpayable
```
Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient&#39;s account in layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway in layer 2.
Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient&#39;s account on layer 1. The function should only be called by L1ScrollMessenger. The function should also only be called by L2ERC1155Gateway on layer 2.
@@ -142,8 +142,8 @@ Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient&#39
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 2. |
| _to | address | The address of recipient in layer 1 to receive the token. |
| _from | address | The address of account who withdraw the token on layer 2. |
| _to | address | The address of recipient on layer 1 to receive the token. |
| _tokenId | uint256 | The token id to withdraw. |
| _amount | uint256 | The amount of token to withdraw. |
@@ -368,8 +368,8 @@ Update layer 2 to layer 2 token mapping.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding ERC1155 token in layer 2. |
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC1155 token on layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
@@ -381,7 +381,7 @@ Update layer 2 to layer 2 token mapping.
event BatchDepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
```
Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
Emitted when the ERC1155 NFT is batch deposited to gateway on layer 1.
@@ -421,7 +421,7 @@ Emitted when some ERC1155 token is refunded.
event DepositERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
```
Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
@@ -442,7 +442,7 @@ Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
event FinalizeBatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
```
Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
@@ -463,7 +463,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
event FinalizeWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
```
Emitted when the ERC1155 NFT is transfered to recipient in layer 1.
Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
@@ -544,8 +544,8 @@ Emitted when token mapping for ERC1155 token is updated.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding ERC1155 token in layer 2. |
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC1155 token on layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |

View File

@@ -4,7 +4,7 @@
> L1ERC721Gateway
The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT in layer 1 and finalize withdraw the NFTs from layer 2.
The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT on layer 1 and finalize withdraw the NFTs from layer 2.
*The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding NFT will be transfer to the recipient directly. This will be changed if we have more specific scenarios.*
@@ -24,8 +24,8 @@ Deposit a list of some ERC721 NFT to a recipient&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 1. |
| _to | address | The address of recipient in layer 2. |
| _token | address | The address of ERC721 NFT on layer 1. |
| _to | address | The address of recipient on layer 2. |
| _tokenIds | uint256[] | The list of token ids to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -43,7 +43,7 @@ Deposit a list of some ERC721 NFT to caller&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 1. |
| _token | address | The address of ERC721 NFT on layer 1. |
| _tokenIds | uint256[] | The list of token ids to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -78,8 +78,8 @@ Deposit some ERC721 NFT to a recipient&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 1. |
| _to | address | The address of recipient in layer 2. |
| _token | address | The address of ERC721 NFT on layer 1. |
| _to | address | The address of recipient on layer 2. |
| _tokenId | uint256 | The token id to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -97,7 +97,7 @@ Deposit some ERC721 NFT to caller&#39;s account on layer 2.
| Name | Type | Description |
|---|---|---|
| _token | address | The address of ERC721 NFT in layer 1. |
| _token | address | The address of ERC721 NFT on layer 1. |
| _tokenId | uint256 | The token id to deposit. |
| _gasLimit | uint256 | Estimated gas limit required to complete the deposit on layer 2. |
@@ -107,9 +107,9 @@ Deposit some ERC721 NFT to caller&#39;s account on layer 2.
function finalizeBatchWithdrawERC721(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds) external nonpayable
```
Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s account in layer 1.
Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s account on layer 1.
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway in layer 2.*
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway on layer 2.*
#### Parameters
@@ -117,8 +117,8 @@ Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 2. |
| _to | address | The address of recipient in layer 1 to receive the token. |
| _from | address | The address of account who withdraw the token on layer 2. |
| _to | address | The address of recipient on layer 1 to receive the token. |
| _tokenIds | uint256[] | The list of token ids to withdraw. |
### finalizeWithdrawERC721
@@ -127,9 +127,9 @@ Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient
function finalizeWithdrawERC721(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId) external nonpayable
```
Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s account in layer 1.
Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s account on layer 1.
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway in layer 2.*
*Requirements: - The function should only be called by L1ScrollMessenger. - The function should also only be called by L2ERC721Gateway on layer 2.*
#### Parameters
@@ -137,8 +137,8 @@ Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient&#39;s
|---|---|---|
| _l1Token | address | The address of corresponding layer 1 token. |
| _l2Token | address | The address of corresponding layer 2 token. |
| _from | address | The address of account who withdraw the token in layer 2. |
| _to | address | The address of recipient in layer 1 to receive the token. |
| _from | address | The address of account who withdraw the token on layer 2. |
| _to | address | The address of recipient on layer 1 to receive the token. |
| _tokenId | uint256 | The token id to withdraw. |
### initialize
@@ -313,8 +313,8 @@ Update layer 2 to layer 2 token mapping.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding ERC721 token in layer 2. |
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC721 token on layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
@@ -326,7 +326,7 @@ Update layer 2 to layer 2 token mapping.
event BatchDepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
```
Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
Emitted when the ERC721 NFT is batch deposited to gateway on layer 1.
@@ -364,7 +364,7 @@ Emitted when a batch of ERC721 tokens are refunded.
event DepositERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
```
Emitted when the ERC721 NFT is deposited to gateway in layer 1.
Emitted when the ERC721 NFT is deposited to gateway on layer 1.
@@ -384,7 +384,7 @@ Emitted when the ERC721 NFT is deposited to gateway in layer 1.
event FinalizeBatchWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
@@ -404,7 +404,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
event FinalizeWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
```
Emitted when the ERC721 NFT is transfered to recipient in layer 1.
Emitted when the ERC721 NFT is transfered to recipient on layer 1.
@@ -483,8 +483,8 @@ Emitted when token mapping for ERC721 token is updated.
| Name | Type | Description |
|---|---|---|
| _l1Token | address | The address of corresponding ERC721 token in layer 2. |
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC721 token on layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |

View File

@@ -4,7 +4,7 @@
> L1StandardERC20Gateway
The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens in layer 1 and finalize withdraw the tokens from layer 2.
The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens on layer 1 and finalize withdraw the tokens from layer 2.
*The deposited ERC20 tokens are held in this gateway. On finalizing withdraw, the corresponding token will be transfer to the recipient directly. Any ERC20 that requires non-standard functionality should use a separate gateway.*

View File

@@ -4,7 +4,7 @@
> L1WETHGateway
The `L1WETHGateway` contract is used to deposit `WETH` token in layer 1 and finalize withdraw `WETH` from layer 2.
The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and finalize withdraw `WETH` from layer 2.
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*

View File

@@ -4,7 +4,7 @@
> L2ERC1155Gateway
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 and finalize deposit the NFTs from layer 1.
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
@@ -72,9 +72,9 @@ The address of corresponding L1/L2 Gateway contract.
function finalizeBatchDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds, uint256[] _amounts) external nonpayable
```
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account on layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway on layer 1.*
#### Parameters
@@ -93,9 +93,9 @@ Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s
function finalizeDepositERC1155(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId, uint256 _amount) external nonpayable
```
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account on layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway in layer 1.*
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC1155Gateway on layer 1.*
#### Parameters
@@ -313,8 +313,8 @@ Update layer 2 to layer 1 token mapping.
| Name | Type | Description |
|---|---|---|
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC1155 token in layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
| _l1Token | address | The address of ERC1155 token on layer 1. |
### withdrawERC1155
@@ -365,7 +365,7 @@ Withdraw some ERC1155 NFT to caller&#39;s account on layer 1.
event BatchWithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
@@ -386,7 +386,7 @@ Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
event FinalizeBatchDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
```
Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
@@ -407,7 +407,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
event FinalizeDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
@@ -469,8 +469,8 @@ Emitted when token mapping for ERC1155 token is updated.
| Name | Type | Description |
|---|---|---|
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC1155 token in layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
| _l1Token | address | The address of ERC1155 token on layer 1. |
### WithdrawERC1155
@@ -478,7 +478,7 @@ Emitted when token mapping for ERC1155 token is updated.
event WithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
```
Emitted when the ERC1155 NFT is transfered to gateway in layer 2.
Emitted when the ERC1155 NFT is transfered to gateway on layer 2.

View File

@@ -4,7 +4,7 @@
> L2ERC721Gateway
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and finalize deposit the NFTs from layer 1.
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
@@ -70,9 +70,9 @@ The address of corresponding L1/L2 Gateway contract.
function finalizeBatchDepositERC721(address _l1Token, address _l2Token, address _from, address _to, uint256[] _tokenIds) external nonpayable
```
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account on layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway in layer 1.*
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway on layer 1.*
#### Parameters
@@ -90,9 +90,9 @@ Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s
function finalizeDepositERC721(address _l1Token, address _l2Token, address _from, address _to, uint256 _tokenId) external nonpayable
```
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account in layer 2.
Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient&#39;s account on layer 2.
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway in layer 1.*
*Requirements: - The function should only be called by L2ScrollMessenger. - The function should also only be called by L1ERC721Gateway on layer 1.*
#### Parameters
@@ -260,8 +260,8 @@ Update layer 2 to layer 1 token mapping.
| Name | Type | Description |
|---|---|---|
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC721 token in layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
| _l1Token | address | The address of ERC721 token on layer 1. |
### withdrawERC721
@@ -310,7 +310,7 @@ Withdraw some ERC721 NFT to caller&#39;s account on layer 1.
event BatchWithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
@@ -330,7 +330,7 @@ Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
event FinalizeBatchDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
```
Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
@@ -350,7 +350,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
event FinalizeDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to recipient in layer 2.
Emitted when the ERC721 NFT is transfered to recipient on layer 2.
@@ -411,8 +411,8 @@ Emitted when token mapping for ERC721 token is updated.
| Name | Type | Description |
|---|---|---|
| _l2Token | address | undefined |
| _l1Token | address | The address of ERC721 token in layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
| _l1Token | address | The address of ERC721 token on layer 1. |
### WithdrawERC721
@@ -420,7 +420,7 @@ Emitted when token mapping for ERC721 token is updated.
event WithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
```
Emitted when the ERC721 NFT is transfered to gateway in layer 2.
Emitted when the ERC721 NFT is transfered to gateway on layer 2.

View File

@@ -6,7 +6,7 @@
The `L2ScrollMessenger` contract can: 1. send messages from layer 2 to layer 1; 2. relay messages from layer 1 layer 2; 3. drop expired message due to sequencer problems.
*It should be a predeployed contract in layer 2 and should hold infinite amount of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.*
*It should be a predeployed contract on layer 2 and should hold infinite amount of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.*
## Methods

View File

@@ -4,7 +4,7 @@
> L2StandardERC20Gateway
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens in layer 2 and finalize deposit the tokens from layer 1.
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and finalize deposit the tokens from layer 1.
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*

View File

@@ -4,7 +4,7 @@
> L2WETHGateway
The `L2WETHGateway` contract is used to withdraw `WETH` token in layer 2 and finalize deposit `WETH` from layer 1.
The `L2WETHGateway` contract is used to withdraw `WETH` token on layer 2 and finalize deposit `WETH` from layer 1.
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*

View File

@@ -95,16 +95,16 @@ describe("EnforcedTxGateway.spec", async () => {
});
});
context("#setPaused", async () => {
context("#setPause", async () => {
it("should revert, when non-owner call", async () => {
await expect(gateway.connect(signer).setPaused(false)).to.revertedWith("Ownable: caller is not the owner");
await expect(gateway.connect(signer).setPause(false)).to.revertedWith("Ownable: caller is not the owner");
});
it("should succeed", async () => {
expect(await gateway.paused()).to.eq(false);
await expect(gateway.setPaused(true)).to.emit(gateway, "Paused").withArgs(deployer.address);
await expect(gateway.setPause(true)).to.emit(gateway, "Paused").withArgs(deployer.address);
expect(await gateway.paused()).to.eq(true);
await expect(gateway.setPaused(false)).to.emit(gateway, "Unpaused").withArgs(deployer.address);
await expect(gateway.setPause(false)).to.emit(gateway, "Unpaused").withArgs(deployer.address);
expect(await gateway.paused()).to.eq(false);
});
});
@@ -112,7 +112,7 @@ describe("EnforcedTxGateway.spec", async () => {
context("#sendTransaction, by EOA", async () => {
it("should revert, when contract is paused", async () => {
await gateway.setPaused(true);
await gateway.setPause(true);
await expect(
gateway.connect(signer)["sendTransaction(address,uint256,uint256,bytes)"](signer.address, 0, 0, "0x")
).to.revertedWith("Pausable: paused");
@@ -246,7 +246,7 @@ describe("EnforcedTxGateway.spec", async () => {
};
it("should revert, when contract is paused", async () => {
await gateway.setPaused(true);
await gateway.setPause(true);
await expect(
gateway
.connect(deployer)

View File

@@ -182,8 +182,8 @@ contract L1ScrollMessenger is PausableUpgradeable, ScrollMessengerBase, IL1Scrol
address _refundAddress
) external payable override whenNotPaused notInExecution {
// We will use a different `queueIndex` for the replaced message. However, the original `queueIndex` or `nonce`
// is encoded in the `_message`. We will check the `xDomainCalldata` in layer 2 to avoid duplicated execution.
// So, only one message will succeed in layer 2. If one of the message is executed successfully, the other one
// is encoded in the `_message`. We will check the `xDomainCalldata` on layer 2 to avoid duplicated execution.
// So, only one message will succeed on layer 2. If one of the message is executed successfully, the other one
// will revert with "Message was already successfully executed".
address _messageQueue = messageQueue;
address _counterpart = counterpart;

View File

@@ -152,7 +152,7 @@ contract EnforcedTxGateway is OwnableUpgradeable, ReentrancyGuardUpgradeable, Pa
/// @notice Pause or unpause this contract.
/// @param _status Pause this contract if it is true, otherwise unpause this contract.
function setPaused(bool _status) external onlyOwner {
function setPause(bool _status) external onlyOwner {
if (_status) {
_pause();
} else {

View File

@@ -2,17 +2,17 @@
pragma solidity ^0.8.16;
/// @title The interface for the ERC1155 cross chain gateway in layer 1.
/// @title The interface for the ERC1155 cross chain gateway on layer 1.
interface IL1ERC1155Gateway {
/**********
* Events *
**********/
/// @notice Emitted when the ERC1155 NFT is transfered to recipient in layer 1.
/// @param _l1Token The address of ERC1155 NFT in layer 1.
/// @param _l2Token The address of ERC1155 NFT in layer 2.
/// @param _from The address of sender in layer 2.
/// @param _to The address of recipient in layer 1.
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenId The token id of the ERC1155 NFT to withdraw from layer 2.
/// @param _amount The number of token to withdraw from layer 2.
event FinalizeWithdrawERC1155(
@@ -24,11 +24,11 @@ interface IL1ERC1155Gateway {
uint256 _amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient in layer 1.
/// @param _l1Token The address of ERC1155 NFT in layer 1.
/// @param _l2Token The address of ERC1155 NFT in layer 2.
/// @param _from The address of sender in layer 2.
/// @param _to The address of recipient in layer 1.
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenIds The list of token ids of the ERC1155 NFT to withdraw from layer 2.
/// @param _amounts The list of corresponding number of token to withdraw from layer 2.
event FinalizeBatchWithdrawERC1155(
@@ -40,13 +40,13 @@ interface IL1ERC1155Gateway {
uint256[] _amounts
);
/// @notice Emitted when the ERC1155 NFT is deposited to gateway in layer 1.
/// @param _l1Token The address of ERC1155 NFT in layer 1.
/// @param _l2Token The address of ERC1155 NFT in layer 2.
/// @param _from The address of sender in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _tokenId The token id of the ERC1155 NFT to deposit in layer 1.
/// @param _amount The number of token to deposit in layer 1.
/// @notice Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id of the ERC1155 NFT to deposit on layer 1.
/// @param _amount The number of token to deposit on layer 1.
event DepositERC1155(
address indexed _l1Token,
address indexed _l2Token,
@@ -56,13 +56,13 @@ interface IL1ERC1155Gateway {
uint256 _amount
);
/// @notice Emitted when the ERC1155 NFT is batch deposited to gateway in layer 1.
/// @param _l1Token The address of ERC1155 NFT in layer 1.
/// @param _l2Token The address of ERC1155 NFT in layer 2.
/// @param _from The address of sender in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _tokenIds The list of token ids of the ERC1155 NFT to deposit in layer 1.
/// @param _amounts The list of corresponding number of token to deposit in layer 1.
/// @notice Emitted when the ERC1155 NFT is batch deposited to gateway on layer 1.
/// @param _l1Token The address of ERC1155 NFT on layer 1.
/// @param _l2Token The address of ERC1155 NFT on layer 2.
/// @param _from The address of sender on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids of the ERC1155 NFT to deposit on layer 1.
/// @param _amounts The list of corresponding number of token to deposit on layer 1.
event BatchDepositERC1155(
address indexed _l1Token,
address indexed _l2Token,
@@ -91,7 +91,7 @@ interface IL1ERC1155Gateway {
*************************/
/// @notice Deposit some ERC1155 NFT to caller's account on layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _tokenId The token id to deposit.
/// @param _amount The amount of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
@@ -103,8 +103,8 @@ interface IL1ERC1155Gateway {
) external payable;
/// @notice Deposit some ERC1155 NFT to a recipient's account on layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id to deposit.
/// @param _amount The amount of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
@@ -117,7 +117,7 @@ interface IL1ERC1155Gateway {
) external payable;
/// @notice Deposit a list of some ERC1155 NFT to caller's account on layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _tokenIds The list of token ids to deposit.
/// @param _amounts The list of corresponding number of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
@@ -129,8 +129,8 @@ interface IL1ERC1155Gateway {
) external payable;
/// @notice Deposit a list of some ERC1155 NFT to a recipient's account on layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids to deposit.
/// @param _amounts The list of corresponding number of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
@@ -142,13 +142,13 @@ interface IL1ERC1155Gateway {
uint256 _gasLimit
) external payable;
/// @notice Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1.
/// @notice Complete ERC1155 withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1.
/// The function should only be called by L1ScrollMessenger.
/// The function should also only be called by L2ERC1155Gateway in layer 2.
/// The function should also only be called by L2ERC1155Gateway on layer 2.
/// @param _l1Token The address of corresponding layer 1 token.
/// @param _l2Token The address of corresponding layer 2 token.
/// @param _from The address of account who withdraw the token in layer 2.
/// @param _to The address of recipient in layer 1 to receive the token.
/// @param _from The address of account who withdraw the token on layer 2.
/// @param _to The address of recipient on layer 1 to receive the token.
/// @param _tokenId The token id to withdraw.
/// @param _amount The amount of token to withdraw.
function finalizeWithdrawERC1155(
@@ -160,13 +160,13 @@ interface IL1ERC1155Gateway {
uint256 _amount
) external;
/// @notice Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account in layer 1.
/// @notice Complete ERC1155 batch withdraw from layer 2 to layer 1 and send fund to recipient's account on layer 1.
/// The function should only be called by L1ScrollMessenger.
/// The function should also only be called by L2ERC1155Gateway in layer 2.
/// The function should also only be called by L2ERC1155Gateway on layer 2.
/// @param _l1Token The address of corresponding layer 1 token.
/// @param _l2Token The address of corresponding layer 2 token.
/// @param _from The address of account who withdraw the token in layer 2.
/// @param _to The address of recipient in layer 1 to receive the token.
/// @param _from The address of account who withdraw the token on layer 2.
/// @param _to The address of recipient on layer 1 to receive the token.
/// @param _tokenIds The list of token ids to withdraw.
/// @param _amounts The list of corresponding number of token to withdraw.
function finalizeBatchWithdrawERC1155(

View File

@@ -2,17 +2,17 @@
pragma solidity ^0.8.16;
/// @title The interface for the ERC721 cross chain gateway in layer 1.
/// @title The interface for the ERC721 cross chain gateway on layer 1.
interface IL1ERC721Gateway {
/**********
* Events *
**********/
/// @notice Emitted when the ERC721 NFT is transfered to recipient in layer 1.
/// @param _l1Token The address of ERC721 NFT in layer 1.
/// @param _l2Token The address of ERC721 NFT in layer 2.
/// @param _from The address of sender in layer 2.
/// @param _to The address of recipient in layer 1.
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenId The token id of the ERC721 NFT to withdraw from layer 2.
event FinalizeWithdrawERC721(
address indexed _l1Token,
@@ -22,11 +22,11 @@ interface IL1ERC721Gateway {
uint256 _tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient in layer 1.
/// @param _l1Token The address of ERC721 NFT in layer 1.
/// @param _l2Token The address of ERC721 NFT in layer 2.
/// @param _from The address of sender in layer 2.
/// @param _to The address of recipient in layer 1.
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenIds The list of token ids of the ERC721 NFT to withdraw from layer 2.
event FinalizeBatchWithdrawERC721(
address indexed _l1Token,
@@ -36,12 +36,12 @@ interface IL1ERC721Gateway {
uint256[] _tokenIds
);
/// @notice Emitted when the ERC721 NFT is deposited to gateway in layer 1.
/// @param _l1Token The address of ERC721 NFT in layer 1.
/// @param _l2Token The address of ERC721 NFT in layer 2.
/// @param _from The address of sender in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _tokenId The token id of the ERC721 NFT to deposit in layer 1.
/// @notice Emitted when the ERC721 NFT is deposited to gateway on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id of the ERC721 NFT to deposit on layer 1.
event DepositERC721(
address indexed _l1Token,
address indexed _l2Token,
@@ -50,12 +50,12 @@ interface IL1ERC721Gateway {
uint256 _tokenId
);
/// @notice Emitted when the ERC721 NFT is batch deposited to gateway in layer 1.
/// @param _l1Token The address of ERC721 NFT in layer 1.
/// @param _l2Token The address of ERC721 NFT in layer 2.
/// @param _from The address of sender in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _tokenIds The list of token ids of the ERC721 NFT to deposit in layer 1.
/// @notice Emitted when the ERC721 NFT is batch deposited to gateway on layer 1.
/// @param _l1Token The address of ERC721 NFT on layer 1.
/// @param _l2Token The address of ERC721 NFT on layer 2.
/// @param _from The address of sender on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids of the ERC721 NFT to deposit on layer 1.
event BatchDepositERC721(
address indexed _l1Token,
address indexed _l2Token,
@@ -81,7 +81,7 @@ interface IL1ERC721Gateway {
*****************************/
/// @notice Deposit some ERC721 NFT to caller's account on layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _tokenId The token id to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function depositERC721(
@@ -91,8 +91,8 @@ interface IL1ERC721Gateway {
) external payable;
/// @notice Deposit some ERC721 NFT to a recipient's account on layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function depositERC721(
@@ -103,7 +103,7 @@ interface IL1ERC721Gateway {
) external payable;
/// @notice Deposit a list of some ERC721 NFT to caller's account on layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _tokenIds The list of token ids to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function batchDepositERC721(
@@ -113,8 +113,8 @@ interface IL1ERC721Gateway {
) external payable;
/// @notice Deposit a list of some ERC721 NFT to a recipient's account on layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function batchDepositERC721(
@@ -124,14 +124,14 @@ interface IL1ERC721Gateway {
uint256 _gasLimit
) external payable;
/// @notice Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
/// @notice Complete ERC721 withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
/// @dev Requirements:
/// - The function should only be called by L1ScrollMessenger.
/// - The function should also only be called by L2ERC721Gateway in layer 2.
/// - The function should also only be called by L2ERC721Gateway on layer 2.
/// @param _l1Token The address of corresponding layer 1 token.
/// @param _l2Token The address of corresponding layer 2 token.
/// @param _from The address of account who withdraw the token in layer 2.
/// @param _to The address of recipient in layer 1 to receive the token.
/// @param _from The address of account who withdraw the token on layer 2.
/// @param _to The address of recipient on layer 1 to receive the token.
/// @param _tokenId The token id to withdraw.
function finalizeWithdrawERC721(
address _l1Token,
@@ -141,14 +141,14 @@ interface IL1ERC721Gateway {
uint256 _tokenId
) external;
/// @notice Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account in layer 1.
/// @notice Complete ERC721 batch withdraw from layer 2 to layer 1 and send NFT to recipient's account on layer 1.
/// @dev Requirements:
/// - The function should only be called by L1ScrollMessenger.
/// - The function should also only be called by L2ERC721Gateway in layer 2.
/// - The function should also only be called by L2ERC721Gateway on layer 2.
/// @param _l1Token The address of corresponding layer 1 token.
/// @param _l2Token The address of corresponding layer 2 token.
/// @param _from The address of account who withdraw the token in layer 2.
/// @param _to The address of recipient in layer 1 to receive the token.
/// @param _from The address of account who withdraw the token on layer 2.
/// @param _to The address of recipient on layer 1 to receive the token.
/// @param _tokenIds The list of token ids to withdraw.
function finalizeBatchWithdrawERC721(
address _l1Token,

View File

@@ -14,7 +14,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
/// @title L1CustomERC20Gateway
/// @notice The `L1CustomERC20Gateway` is used to deposit custom ERC20 compatible tokens in layer 1 and
/// @notice The `L1CustomERC20Gateway` is used to deposit custom ERC20 compatible tokens on layer 1 and
/// finalize withdraw the tokens from layer 2.
/// @dev The deposited tokens are held in this gateway. On finalizing withdraw, the corresponding
/// tokens will be transfer to the recipient directly.
@@ -26,8 +26,8 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
**********/
/// @notice Emitted when token mapping for ERC20 token is updated.
/// @param _l1Token The address of ERC20 token in layer 1.
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
/// @param _l1Token The address of ERC20 token on layer 1.
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
event UpdateTokenMapping(address _l1Token, address _l2Token);
/*************
@@ -74,8 +74,8 @@ contract L1CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L1ERC20G
************************/
/// @notice Update layer 1 to layer 2 token mapping.
/// @param _l1Token The address of ERC20 token in layer 1.
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
/// @param _l1Token The address of ERC20 token on layer 1.
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
require(_l2Token != address(0), "token address cannot be 0");

View File

@@ -14,7 +14,7 @@ import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallba
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC1155Gateway
/// @notice The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT in layer 1 and
/// @notice The `L1ERC1155Gateway` is used to deposit ERC1155 compatible NFT on layer 1 and
/// finalize withdraw the NFTs from layer 2.
/// @dev The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding
/// NFT will be transfer to the recipient directly.
@@ -32,8 +32,8 @@ contract L1ERC1155Gateway is
**********/
/// @notice Emitted when token mapping for ERC1155 token is updated.
/// @param _l1Token The address of ERC1155 token in layer 1.
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
/// @param _l1Token The address of ERC1155 token on layer 1.
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
event UpdateTokenMapping(address _l1Token, address _l2Token);
/*************
@@ -172,8 +172,8 @@ contract L1ERC1155Gateway is
************************/
/// @notice Update layer 2 to layer 2 token mapping.
/// @param _l1Token The address of ERC1155 token in layer 1.
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
/// @param _l1Token The address of ERC1155 token on layer 1.
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
require(_l2Token != address(0), "token address cannot be 0");
@@ -187,8 +187,8 @@ contract L1ERC1155Gateway is
**********************/
/// @dev Internal function to deposit ERC1155 NFT to layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id to deposit.
/// @param _amount The amount of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
@@ -220,8 +220,8 @@ contract L1ERC1155Gateway is
}
/// @dev Internal function to batch deposit ERC1155 NFT to layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids to deposit.
/// @param _amounts The list of corresponding number of token to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.

View File

@@ -14,7 +14,7 @@ import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallba
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L1ERC721Gateway
/// @notice The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT in layer 1 and
/// @notice The `L1ERC721Gateway` is used to deposit ERC721 compatible NFT on layer 1 and
/// finalize withdraw the NFTs from layer 2.
/// @dev The deposited NFTs are held in this gateway. On finalizing withdraw, the corresponding
/// NFT will be transfer to the recipient directly.
@@ -32,8 +32,8 @@ contract L1ERC721Gateway is
**********/
/// @notice Emitted when token mapping for ERC721 token is updated.
/// @param _l1Token The address of ERC721 token in layer 1.
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
/// @param _l1Token The address of ERC721 token on layer 1.
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
event UpdateTokenMapping(address _l1Token, address _l2Token);
/*************
@@ -168,8 +168,8 @@ contract L1ERC721Gateway is
************************/
/// @notice Update layer 2 to layer 2 token mapping.
/// @param _l1Token The address of ERC721 token in layer 1.
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
/// @param _l1Token The address of ERC721 token on layer 1.
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
function updateTokenMapping(address _l1Token, address _l2Token) external onlyOwner {
require(_l2Token != address(0), "token address cannot be 0");
@@ -183,8 +183,8 @@ contract L1ERC721Gateway is
**********************/
/// @dev Internal function to deposit ERC721 NFT to layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function _depositERC721(
@@ -212,8 +212,8 @@ contract L1ERC721Gateway is
}
/// @dev Internal function to batch deposit ERC721 NFT to layer 2.
/// @param _token The address of ERC721 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC721 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids to deposit.
/// @param _gasLimit Estimated gas limit required to complete the deposit on layer 2.
function _batchDepositERC721(

View File

@@ -14,7 +14,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
// solhint-disable avoid-low-level-calls
/// @title L1ETHGateway
/// @notice The `L1ETHGateway` is used to deposit ETH in layer 1 and
/// @notice The `L1ETHGateway` is used to deposit ETH on layer 1 and
/// finalize withdraw ETH from layer 2.
/// @dev The deposited ETH tokens are held in this gateway. On finalizing withdraw, the corresponding
/// ETH will be transfer to the recipient directly.

View File

@@ -16,7 +16,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
/// @title L1StandardERC20Gateway
/// @notice The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens in layer 1 and
/// @notice The `L1StandardERC20Gateway` is used to deposit standard ERC20 tokens on layer 1 and
/// finalize withdraw the tokens from layer 2.
/// @dev The deposited ERC20 tokens are held in this gateway. On finalizing withdraw, the corresponding
/// token will be transfer to the recipient directly. Any ERC20 that requires non-standard functionality

View File

@@ -15,7 +15,7 @@ import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "./L1ERC20Gateway.sol";
/// @title L1WETHGateway
/// @notice The `L1WETHGateway` contract is used to deposit `WETH` token in layer 1 and
/// @notice The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and
/// finalize withdraw `WETH` from layer 2.
/// @dev The deposited WETH tokens are not held in the gateway. It will first be unwrapped
/// as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract.

View File

@@ -20,8 +20,8 @@ interface IScrollChain {
/// @notice Emitted when a batch is finalized.
/// @param batchIndex The index of the batch.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root in layer 2 after this batch.
/// @param withdrawRoot The merkle root in layer2 after this batch.
/// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/*************************

View File

@@ -24,7 +24,7 @@ import {ScrollMessengerBase} from "../libraries/ScrollMessengerBase.sol";
/// 2. relay messages from layer 1 layer 2;
/// 3. drop expired message due to sequencer problems.
///
/// @dev It should be a predeployed contract in layer 2 and should hold infinite amount
/// @dev It should be a predeployed contract on layer 2 and should hold infinite amount
/// of Ether (Specifically, `uint256(-1)`), which can be initialized in Genesis Block.
contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2ScrollMessenger {
/**********

View File

@@ -2,18 +2,18 @@
pragma solidity ^0.8.16;
/// @title The interface for the ERC1155 cross chain gateway in layer 2.
/// @title The interface for the ERC1155 cross chain gateway on layer 2.
interface IL2ERC1155Gateway {
/**********
* Events *
**********/
/// @notice Emitted when the ERC1155 NFT is transfered to recipient in layer 2.
/// @param l1Token The address of ERC1155 NFT in layer 1.
/// @param l2Token The address of ERC1155 NFT in layer 2.
/// @param from The address of sender in layer 1.
/// @param to The address of recipient in layer 2.
/// @param tokenId The token id of the ERC1155 NFT deposited in layer 1.
/// @notice Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 1.
/// @param to The address of recipient on layer 2.
/// @param tokenId The token id of the ERC1155 NFT deposited on layer 1.
/// @param amount The amount of token deposited.
event FinalizeDepositERC1155(
address indexed l1Token,
@@ -24,12 +24,12 @@ interface IL2ERC1155Gateway {
uint256 amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient in layer 2.
/// @param l1Token The address of ERC1155 NFT in layer 1.
/// @param l2Token The address of ERC1155 NFT in layer 2.
/// @param from The address of sender in layer 1.
/// @param to The address of recipient in layer 2.
/// @param tokenIds The list of token ids of the ERC1155 NFT deposited in layer 1.
/// @notice Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 1.
/// @param to The address of recipient on layer 2.
/// @param tokenIds The list of token ids of the ERC1155 NFT deposited on layer 1.
/// @param amounts The list of corresponding amounts deposited.
event FinalizeBatchDepositERC1155(
address indexed l1Token,
@@ -40,12 +40,12 @@ interface IL2ERC1155Gateway {
uint256[] amounts
);
/// @notice Emitted when the ERC1155 NFT is transfered to gateway in layer 2.
/// @param l1Token The address of ERC1155 NFT in layer 1.
/// @param l2Token The address of ERC1155 NFT in layer 2.
/// @param from The address of sender in layer 2.
/// @param to The address of recipient in layer 1.
/// @param tokenId The token id of the ERC1155 NFT to withdraw in layer 2.
/// @notice Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenId The token id of the ERC1155 NFT to withdraw on layer 2.
/// @param amount The amount of token to withdraw.
event WithdrawERC1155(
address indexed l1Token,
@@ -56,12 +56,12 @@ interface IL2ERC1155Gateway {
uint256 amount
);
/// @notice Emitted when the ERC1155 NFT is batch transfered to gateway in layer 2.
/// @param l1Token The address of ERC1155 NFT in layer 1.
/// @param l2Token The address of ERC1155 NFT in layer 2.
/// @param from The address of sender in layer 2.
/// @param to The address of recipient in layer 1.
/// @param tokenIds The list of token ids of the ERC1155 NFT to withdraw in layer 2.
/// @notice Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
/// @param l1Token The address of ERC1155 NFT on layer 1.
/// @param l2Token The address of ERC1155 NFT on layer 2.
/// @param from The address of sender on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenIds The list of token ids of the ERC1155 NFT to withdraw on layer 2.
/// @param amounts The list of corresponding amounts to withdraw.
event BatchWithdrawERC1155(
address indexed l1Token,
@@ -77,7 +77,7 @@ interface IL2ERC1155Gateway {
*****************************/
/// @notice Withdraw some ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
/// @param token The address of ERC1155 NFT on layer 2.
/// @param tokenId The token id to withdraw.
/// @param amount The amount of token to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
@@ -89,8 +89,8 @@ interface IL2ERC1155Gateway {
) external payable;
/// @notice Withdraw some ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
/// @param to The address of recipient in layer 1.
/// @param token The address of ERC1155 NFT on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenId The token id to withdraw.
/// @param amount The amount of token to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
@@ -103,7 +103,7 @@ interface IL2ERC1155Gateway {
) external payable;
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
/// @param token The address of ERC1155 NFT on layer 2.
/// @param tokenIds The list of token ids to withdraw.
/// @param amounts The list of corresponding amounts to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
@@ -115,8 +115,8 @@ interface IL2ERC1155Gateway {
) external payable;
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
/// @param to The address of recipient in layer 1.
/// @param token The address of ERC1155 NFT on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenIds The list of token ids to withdraw.
/// @param amounts The list of corresponding amounts to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
@@ -128,14 +128,14 @@ interface IL2ERC1155Gateway {
uint256 gasLimit
) external payable;
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
/// @dev Requirements:
/// - The function should only be called by L2ScrollMessenger.
/// - The function should also only be called by L1ERC1155Gateway in layer 1.
/// - The function should also only be called by L1ERC1155Gateway on layer 1.
/// @param l1Token The address of corresponding layer 1 token.
/// @param l2Token The address of corresponding layer 2 token.
/// @param from The address of account who deposits the token in layer 1.
/// @param to The address of recipient in layer 2 to receive the token.
/// @param from The address of account who deposits the token on layer 1.
/// @param to The address of recipient on layer 2 to receive the token.
/// @param tokenId The token id to deposit.
/// @param amount The amount of token to deposit.
function finalizeDepositERC1155(
@@ -147,14 +147,14 @@ interface IL2ERC1155Gateway {
uint256 amount
) external;
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
/// @dev Requirements:
/// - The function should only be called by L2ScrollMessenger.
/// - The function should also only be called by L1ERC1155Gateway in layer 1.
/// - The function should also only be called by L1ERC1155Gateway on layer 1.
/// @param l1Token The address of corresponding layer 1 token.
/// @param l2Token The address of corresponding layer 2 token.
/// @param from The address of account who deposits the token in layer 1.
/// @param to The address of recipient in layer 2 to receive the token.
/// @param from The address of account who deposits the token on layer 1.
/// @param to The address of recipient on layer 2 to receive the token.
/// @param tokenIds The list of token ids to deposit.
/// @param amounts The list of corresponding amounts to deposit.
function finalizeBatchDepositERC1155(

View File

@@ -2,18 +2,18 @@
pragma solidity ^0.8.16;
/// @title The interface for the ERC721 cross chain gateway in layer 2.
/// @title The interface for the ERC721 cross chain gateway on layer 2.
interface IL2ERC721Gateway {
/**********
* Events *
**********/
/// @notice Emitted when the ERC721 NFT is transfered to recipient in layer 2.
/// @param l1Token The address of ERC721 NFT in layer 1.
/// @param l2Token The address of ERC721 NFT in layer 2.
/// @param from The address of sender in layer 1.
/// @param to The address of recipient in layer 2.
/// @param tokenId The token id of the ERC721 NFT deposited in layer 1.
/// @notice Emitted when the ERC721 NFT is transfered to recipient on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 1.
/// @param to The address of recipient on layer 2.
/// @param tokenId The token id of the ERC721 NFT deposited on layer 1.
event FinalizeDepositERC721(
address indexed l1Token,
address indexed l2Token,
@@ -22,12 +22,12 @@ interface IL2ERC721Gateway {
uint256 tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient in layer 2.
/// @param l1Token The address of ERC721 NFT in layer 1.
/// @param l2Token The address of ERC721 NFT in layer 2.
/// @param from The address of sender in layer 1.
/// @param to The address of recipient in layer 2.
/// @param tokenIds The list of token ids of the ERC721 NFT deposited in layer 1.
/// @notice Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 1.
/// @param to The address of recipient on layer 2.
/// @param tokenIds The list of token ids of the ERC721 NFT deposited on layer 1.
event FinalizeBatchDepositERC721(
address indexed l1Token,
address indexed l2Token,
@@ -36,12 +36,12 @@ interface IL2ERC721Gateway {
uint256[] tokenIds
);
/// @notice Emitted when the ERC721 NFT is transfered to gateway in layer 2.
/// @param l1Token The address of ERC721 NFT in layer 1.
/// @param l2Token The address of ERC721 NFT in layer 2.
/// @param from The address of sender in layer 2.
/// @param to The address of recipient in layer 1.
/// @param tokenId The token id of the ERC721 NFT to withdraw in layer 2.
/// @notice Emitted when the ERC721 NFT is transfered to gateway on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenId The token id of the ERC721 NFT to withdraw on layer 2.
event WithdrawERC721(
address indexed l1Token,
address indexed l2Token,
@@ -50,12 +50,12 @@ interface IL2ERC721Gateway {
uint256 tokenId
);
/// @notice Emitted when the ERC721 NFT is batch transfered to gateway in layer 2.
/// @param l1Token The address of ERC721 NFT in layer 1.
/// @param l2Token The address of ERC721 NFT in layer 2.
/// @param from The address of sender in layer 2.
/// @param to The address of recipient in layer 1.
/// @param tokenIds The list of token ids of the ERC721 NFT to withdraw in layer 2.
/// @notice Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
/// @param l1Token The address of ERC721 NFT on layer 1.
/// @param l2Token The address of ERC721 NFT on layer 2.
/// @param from The address of sender on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenIds The list of token ids of the ERC721 NFT to withdraw on layer 2.
event BatchWithdrawERC721(
address indexed l1Token,
address indexed l2Token,
@@ -69,7 +69,7 @@ interface IL2ERC721Gateway {
*****************************/
/// @notice Withdraw some ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
/// @param token The address of ERC721 NFT on layer 2.
/// @param tokenId The token id to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
function withdrawERC721(
@@ -79,8 +79,8 @@ interface IL2ERC721Gateway {
) external payable;
/// @notice Withdraw some ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
/// @param to The address of recipient in layer 1.
/// @param token The address of ERC721 NFT on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenId The token id to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
function withdrawERC721(
@@ -91,7 +91,7 @@ interface IL2ERC721Gateway {
) external payable;
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
/// @param token The address of ERC721 NFT on layer 2.
/// @param tokenIds The list of token ids to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
function batchWithdrawERC721(
@@ -101,8 +101,8 @@ interface IL2ERC721Gateway {
) external payable;
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
/// @param to The address of recipient in layer 1.
/// @param token The address of ERC721 NFT on layer 2.
/// @param to The address of recipient on layer 1.
/// @param tokenIds The list of token ids to withdraw.
/// @param gasLimit Unused, but included for potential forward compatibility considerations.
function batchWithdrawERC721(
@@ -112,14 +112,14 @@ interface IL2ERC721Gateway {
uint256 gasLimit
) external payable;
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
/// @dev Requirements:
/// - The function should only be called by L2ScrollMessenger.
/// - The function should also only be called by L1ERC721Gateway in layer 1.
/// - The function should also only be called by L1ERC721Gateway on layer 1.
/// @param l1Token The address of corresponding layer 1 token.
/// @param l2Token The address of corresponding layer 2 token.
/// @param from The address of account who withdraw the token in layer 1.
/// @param to The address of recipient in layer 2 to receive the token.
/// @param from The address of account who withdraw the token on layer 1.
/// @param to The address of recipient on layer 2 to receive the token.
/// @param tokenId The token id to withdraw.
function finalizeDepositERC721(
address l1Token,
@@ -129,14 +129,14 @@ interface IL2ERC721Gateway {
uint256 tokenId
) external;
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account on layer 2.
/// @dev Requirements:
/// - The function should only be called by L2ScrollMessenger.
/// - The function should also only be called by L1ERC721Gateway in layer 1.
/// - The function should also only be called by L1ERC721Gateway on layer 1.
/// @param l1Token The address of corresponding layer 1 token.
/// @param l2Token The address of corresponding layer 2 token.
/// @param from The address of account who withdraw the token in layer 1.
/// @param to The address of recipient in layer 2 to receive the token.
/// @param from The address of account who withdraw the token on layer 1.
/// @param to The address of recipient on layer 2 to receive the token.
/// @param tokenIds The list of token ids to withdraw.
function finalizeBatchDepositERC721(
address l1Token,

View File

@@ -12,7 +12,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
import {IScrollERC20} from "../../libraries/token/IScrollERC20.sol";
/// @title L2ERC20Gateway
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens in layer 2 and
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
/// tokens will be minted and transfered to the recipient.
@@ -22,8 +22,8 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
**********/
/// @notice Emitted when token mapping for ERC20 token is updated.
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
/// @param _l1Token The address of ERC20 token in layer 1.
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
/// @param _l1Token The address of ERC20 token on layer 1.
event UpdateTokenMapping(address _l2Token, address _l1Token);
/*************
@@ -95,8 +95,8 @@ contract L2CustomERC20Gateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20G
************************/
/// @notice Update layer 2 to layer 1 token mapping.
/// @param _l2Token The address of corresponding ERC20 token in layer 2.
/// @param _l1Token The address of ERC20 token in layer 1.
/// @param _l2Token The address of corresponding ERC20 token on layer 2.
/// @param _l1Token The address of ERC20 token on layer 1.
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
require(_l1Token != address(0), "token address cannot be 0");

View File

@@ -13,7 +13,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
import {IScrollERC1155} from "../../libraries/token/IScrollERC1155.sol";
/// @title L2ERC1155Gateway
/// @notice The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs in layer 2 and
/// @notice The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and
/// finalize deposit the NFTs from layer 1.
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
/// NFT will be minted and transfered to the recipient.
@@ -25,8 +25,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
**********/
/// @notice Emitted when token mapping for ERC1155 token is updated.
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
/// @param _l1Token The address of ERC1155 token in layer 1.
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
/// @param _l1Token The address of ERC1155 token on layer 1.
event UpdateTokenMapping(address _l2Token, address _l1Token);
/*************
@@ -137,8 +137,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
************************/
/// @notice Update layer 2 to layer 1 token mapping.
/// @param _l1Token The address of corresponding ERC1155 token in layer 2.
/// @param _l1Token The address of ERC1155 token in layer 1.
/// @param _l2Token The address of corresponding ERC1155 token on layer 2.
/// @param _l1Token The address of ERC1155 token on layer 1.
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
require(_l1Token != address(0), "token address cannot be 0");
@@ -152,8 +152,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
**********************/
/// @dev Internal function to withdraw ERC1155 NFT to layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenId The token id to withdraw.
/// @param _amount The amount of token to withdraw.
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 2.
@@ -185,8 +185,8 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
}
/// @dev Internal function to batch withdraw ERC1155 NFT to layer 2.
/// @param _token The address of ERC1155 NFT in layer 1.
/// @param _to The address of recipient in layer 2.
/// @param _token The address of ERC1155 NFT on layer 1.
/// @param _to The address of recipient on layer 2.
/// @param _tokenIds The list of token ids to withdraw.
/// @param _amounts The list of corresponding number of token to withdraw.
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.

View File

@@ -13,7 +13,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollG
import {IScrollERC721} from "../../libraries/token/IScrollERC721.sol";
/// @title L2ERC721Gateway
/// @notice The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs in layer 2 and
/// @notice The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and
/// finalize deposit the NFTs from layer 1.
/// @dev The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding
/// NFT will be minted and transfered to the recipient.
@@ -25,8 +25,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
**********/
/// @notice Emitted when token mapping for ERC721 token is updated.
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
/// @param _l1Token The address of ERC721 token in layer 1.
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
/// @param _l1Token The address of ERC721 token on layer 1.
event UpdateTokenMapping(address _l2Token, address _l1Token);
/*************
@@ -132,8 +132,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
************************/
/// @notice Update layer 2 to layer 1 token mapping.
/// @param _l1Token The address of corresponding ERC721 token in layer 2.
/// @param _l1Token The address of ERC721 token in layer 1.
/// @param _l2Token The address of corresponding ERC721 token on layer 2.
/// @param _l1Token The address of ERC721 token on layer 1.
function updateTokenMapping(address _l2Token, address _l1Token) external onlyOwner {
require(_l1Token != address(0), "token address cannot be 0");
@@ -147,8 +147,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
**********************/
/// @dev Internal function to withdraw ERC721 NFT to layer 1.
/// @param _token The address of ERC721 NFT in layer 2.
/// @param _to The address of recipient in layer 1.
/// @param _token The address of ERC721 NFT on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenId The token id to withdraw.
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.
function _withdrawERC721(
@@ -178,8 +178,8 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
}
/// @dev Internal function to batch withdraw ERC721 NFT to layer 1.
/// @param _token The address of ERC721 NFT in layer 2.
/// @param _to The address of recipient in layer 1.
/// @param _token The address of ERC721 NFT on layer 2.
/// @param _to The address of recipient on layer 1.
/// @param _tokenIds The list of token ids to withdraw.
/// @param _gasLimit Estimated gas limit required to complete the withdraw on layer 1.
function _batchWithdrawERC721(

View File

@@ -11,7 +11,7 @@ import {IL2ETHGateway} from "./IL2ETHGateway.sol";
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L2ETHGateway
/// @notice The `L2ETHGateway` contract is used to withdraw ETH token in layer 2 and
/// @notice The `L2ETHGateway` contract is used to withdraw ETH token on layer 2 and
/// finalize deposit ETH from layer 1.
/// @dev The ETH are not held in the gateway. The ETH will be sent to the `L2ScrollMessenger` contract.
/// On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then transfer to recipient.

View File

@@ -16,7 +16,7 @@ import {IScrollStandardERC20Factory} from "../../libraries/token/IScrollStandard
import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L2StandardERC20Gateway
/// @notice The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens in layer 2 and
/// @notice The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding
/// token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality

View File

@@ -13,7 +13,7 @@ import {IL1ERC20Gateway} from "../../L1/gateways/IL1ERC20Gateway.sol";
import {ScrollGatewayBase, IScrollGateway} from "../../libraries/gateway/ScrollGatewayBase.sol";
/// @title L2WETHGateway
/// @notice The `L2WETHGateway` contract is used to withdraw `WETH` token in layer 2 and
/// @notice The `L2WETHGateway` contract is used to withdraw `WETH` token on layer 2 and
/// finalize deposit `WETH` from layer 1.
/// @dev The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and
/// then the Ether will be sent to the `L2ScrollMessenger` contract.

View File

@@ -15,7 +15,7 @@ import {ScrollGatewayBase, IScrollGateway} from "../../../libraries/gateway/Scro
import {L2ERC20Gateway} from "../L2ERC20Gateway.sol";
/// @title L2USDCGateway
/// @notice The `L2USDCGateway` contract is used to withdraw `USDC` token in layer 2 and
/// @notice The `L2USDCGateway` contract is used to withdraw `USDC` token on layer 2 and
/// finalize deposit `USDC` from layer 1.
contract L2USDCGateway is OwnableUpgradeable, ScrollGatewayBase, L2ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;

View File

@@ -37,7 +37,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `RollerManagerConfig` in [`config/config.go`](config/config.go) for more details.
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManagerConfig` in [`config/config.go`](config/config.go) for more details.
## Start
@@ -54,27 +54,3 @@ The coordinator behavior can be configured using [`config.json`](config.json). C
* For other flags, refer to [`cmd/app/flags.go`](cmd/app/flags.go).
## Codeflow
### cmd/app/app.go
This file defines the main entry point for the coordinator application, setting up the necessary modules, and handling graceful shutdowns. Upon loading config.json file, the coordinator (`cmd/app/app.go`) sets up and starts the HTTP and WebSocket servers using the configured ports and addresses. `flags.go` is used to parse the flags. Then, it creates a new `RollerManager` (`manager.go`) and starts listening.
### manager.go
`manager.go` calls `rollers.go` for prover (aka "roller") management functions. In the process, `rollers.go` calls `client.go`, initializing a prover client. For communication between prover clients and the coordinator manager, `api.go` is used.
`manager.go` uses either `verifier.go` or `mock.go` (for development/testing purposes) to verify the proofs submitted by provers. After verification, `manager.go` will call `roller.go` to update the state of the prover, and then return the result (whether the proof verification process was successful) to the prover.
### api.go
This file contains the implementation of the RPC API for the coordinator manager. The API allows prover clients to interact with the coordinator manager through functions such as `requestToken`, `register`, and `submitProof`.
### rollers.go
This file contains the logic for handling prover-specific tasks, such as assigning tasks to provers, handling completed tasks, and managing prover metrics.
### client/client.go
This file contains the `Client` struct that is callable on the prover side and responsible for communicating with the coordinator through RPC. `RequestToken`, `RegisterAndSubscribe`, and `SubmitProof` are used by `rollers.go`.

View File

@@ -33,19 +33,19 @@ func NewClient(c *rpc.Client) *Client {
return &Client{client: c}
}
// RequestToken generates token for roller
// RequestToken generates token for prover
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
err := c.client.CallContext(ctx, &token, "prover_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
// RegisterAndSubscribe subscribe prover and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
return c.client.Subscribe(ctx, "prover", taskCh, "register", authMsg)
}
// SubmitProof get proof from roller.
// SubmitProof get proof from prover.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) error {
return c.client.CallContext(ctx, nil, "roller_submitProof", proof)
return c.client.CallContext(ctx, nil, "prover_submitProof", proof)
}

View File

@@ -20,7 +20,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
)
var app *cli.App
@@ -56,7 +56,7 @@ func action(ctx *cli.Context) error {
proofCollector := cron.NewCollector(subCtx, db, cfg)
rollermanager.InitRollerManager(db)
provermanager.InitProverManager(db)
defer func() {
proofCollector.Stop()
@@ -84,7 +84,7 @@ func action(ctx *cli.Context) error {
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.RollerManagerConfig.CompressionLevel)
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.ProverManagerConfig.CompressionLevel)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}

View File

@@ -79,9 +79,9 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
if err != nil {
return err
}
// Reset roller manager config for manager test cases.
cfg.RollerManagerConfig = &coordinatorConfig.RollerManagerConfig{
RollersPerSession: 1,
// Reset prover manager config for manager test cases.
cfg.ProverManagerConfig = &coordinatorConfig.ProverManagerConfig{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 1,

View File

@@ -1,7 +1,7 @@
{
"roller_manager_config": {
"prover_manager_config": {
"compression_level": 9,
"rollers_per_session": 1,
"provers_per_session": 1,
"session_attempts": 2,
"collection_time": 180,
"token_time_to_live": 60,

View File

@@ -15,13 +15,13 @@ const (
defaultNumberOfSessionRetryAttempts = 2
)
// RollerManagerConfig loads sequencer configuration items.
type RollerManagerConfig struct {
// ProverManagerConfig loads sequencer configuration items.
type ProverManagerConfig struct {
CompressionLevel int `json:"compression_level,omitempty"`
// asc or desc (default: asc)
OrderSession string `json:"order_session,omitempty"`
// The amount of rollers to pick per proof generation session.
RollersPerSession uint8 `json:"rollers_per_session"`
// The amount of provers to pick per proof generation session.
ProversPerSession uint8 `json:"provers_per_session"`
// Number of attempts that a session can be retried if previous attempts failed.
// Currently we only consider proving timeout as failure here.
SessionAttempts uint8 `json:"session_attempts,omitempty"`
@@ -43,7 +43,7 @@ type L2Config struct {
// Config load configuration items.
type Config struct {
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
ProverManagerConfig *ProverManagerConfig `json:"prover_manager_config"`
DBConfig *database.Config `json:"db_config"`
L2Config *L2Config `json:"l2_config"`
}
@@ -68,18 +68,18 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Check roller's order session
order := strings.ToUpper(cfg.RollerManagerConfig.OrderSession)
// Check prover's order session
order := strings.ToUpper(cfg.ProverManagerConfig.OrderSession)
if len(order) > 0 && !(order == "ASC" || order == "DESC") {
return nil, errors.New("roller config's order session is invalid")
return nil, errors.New("prover config's order session is invalid")
}
cfg.RollerManagerConfig.OrderSession = order
cfg.ProverManagerConfig.OrderSession = order
if cfg.RollerManagerConfig.MaxVerifierWorkers == 0 {
cfg.RollerManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
if cfg.ProverManagerConfig.MaxVerifierWorkers == 0 {
cfg.ProverManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
}
if cfg.RollerManagerConfig.SessionAttempts == 0 {
cfg.RollerManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
if cfg.ProverManagerConfig.SessionAttempts == 0 {
cfg.ProverManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
}
return cfg, nil

View File

@@ -12,9 +12,9 @@ import (
func TestConfig(t *testing.T) {
configTemplate := `{
"roller_manager_config": {
"prover_manager_config": {
"compression_level": 9,
"rollers_per_session": 1,
"provers_per_session": 1,
"session_attempts": %d,
"collection_time": 180,
"token_time_to_live": 60,
@@ -100,7 +100,7 @@ func TestConfig(t *testing.T) {
_, err = NewConfig(tmpFile.Name())
assert.Error(t, err)
assert.Contains(t, err.Error(), "roller config's order session is invalid")
assert.Contains(t, err.Error(), "prover config's order session is invalid")
})
t.Run("Default MaxVerifierWorkers", func(t *testing.T) {
@@ -116,7 +116,7 @@ func TestConfig(t *testing.T) {
cfg, err := NewConfig(tmpFile.Name())
assert.NoError(t, err)
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.RollerManagerConfig.MaxVerifierWorkers)
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.ProverManagerConfig.MaxVerifierWorkers)
})
t.Run("Default SessionAttempts", func(t *testing.T) {
@@ -132,6 +132,6 @@ func TestConfig(t *testing.T) {
cfg, err := NewConfig(tmpFile.Name())
assert.NoError(t, err)
assert.Equal(t, uint8(defaultNumberOfSessionRetryAttempts), cfg.RollerManagerConfig.SessionAttempts)
assert.Equal(t, uint8(defaultNumberOfSessionRetryAttempts), cfg.ProverManagerConfig.SessionAttempts)
})
}

View File

@@ -17,16 +17,16 @@ import (
"scroll-tech/coordinator/internal/logic/proof"
)
// RollerController the roller api controller
type RollerController struct {
// ProverController the prover api controller
type ProverController struct {
tokenCache *cache.Cache
proofReceiver *proof.ZKProofReceiver
taskWorker *proof.TaskWorker
}
// NewRollerController create a roller controller
func NewRollerController(cfg *config.RollerManagerConfig, db *gorm.DB) *RollerController {
return &RollerController{
// NewProverController create a prover controller
func NewProverController(cfg *config.ProverManagerConfig, db *gorm.DB) *ProverController {
return &ProverController{
proofReceiver: proof.NewZKProofReceiver(cfg, db),
taskWorker: proof.NewTaskWorker(),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
@@ -34,7 +34,7 @@ func NewRollerController(cfg *config.RollerManagerConfig, db *gorm.DB) *RollerCo
}
// RequestToken get request token of authMsg
func (r *RollerController) RequestToken(authMsg *message.AuthMsg) (string, error) {
func (r *ProverController) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
@@ -57,20 +57,20 @@ func (r *RollerController) RequestToken(authMsg *message.AuthMsg) (string, error
}
// VerifyToken verifies pubkey for token and expiration time
func (r *RollerController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
func (r *ProverController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, err := authMsg.PublicKey()
if err != nil {
return false, fmt.Errorf("verify token auth msg public key error:%w", err)
}
// GetValue returns nil if value is expired
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. roller name: %s roller pk: %s", authMsg.Identity.Name, pubkey)
return false, fmt.Errorf("failed to find corresponding token. prover name: %s prover pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
// Register register api for roller
func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Register register api for prover
func (r *ProverController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
@@ -78,7 +78,7 @@ func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMs
}
return nil, errors.New("signature verification failed")
}
// Lock here to avoid malicious roller message replay before cleanup of token
// Lock here to avoid malicious prover message replay before cleanup of token
if ok, err := r.verifyToken(authMsg); !ok {
return nil, err
}
@@ -86,7 +86,7 @@ func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMs
if err != nil {
return nil, fmt.Errorf("register auth msg public key error:%w", err)
}
// roller successfully registered, remove token associated with this roller
// prover successfully registered, remove token associated with this prover
r.tokenCache.Delete(pubkey)
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
@@ -96,8 +96,8 @@ func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMs
return rpcSub, nil
}
// SubmitProof roller pull proof
func (r *RollerController) SubmitProof(proof *message.ProofMsg) error {
// SubmitProof prover pull proof
func (r *ProverController) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {

View File

@@ -21,7 +21,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
@@ -30,7 +30,7 @@ import (
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test1",
Name: "prover_test1",
},
}
privKey, err := crypto.GenerateKey()
@@ -39,24 +39,24 @@ func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
return authMsg, privKey
}
var rollerController *RollerController
var proverController *ProverController
func init() {
conf := &config.RollerManagerConfig{
conf := &config.ProverManagerConfig{
TokenTimeToLive: 120,
}
conf.Verifier = &config.VerifierConfig{MockMode: true}
rollerController = NewRollerController(conf, nil)
proverController = NewProverController(conf, nil)
}
func TestRoller_RequestToken(t *testing.T) {
func TestProver_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_request_token",
Name: "prover_test_request_token",
},
}
token, err := rollerController.RequestToken(tmpAuthMsg)
token, err := proverController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
@@ -66,8 +66,8 @@ func TestRoller_RequestToken(t *testing.T) {
key, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
rollerController.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := rollerController.RequestToken(tmpAuthMsg)
proverController.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := proverController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
@@ -78,7 +78,7 @@ func TestRoller_RequestToken(t *testing.T) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
token, err := proverController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
@@ -90,45 +90,45 @@ func TestRoller_RequestToken(t *testing.T) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
token, err := proverController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestRoller_Register(t *testing.T) {
func TestProver_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_register",
Name: "prover_test_register",
},
}
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
@@ -136,7 +136,7 @@ func TestRoller_Register(t *testing.T) {
convey.Convey("register failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
@@ -145,14 +145,14 @@ func TestRoller_Register(t *testing.T) {
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, errors.New("register error")
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
@@ -161,17 +161,17 @@ func TestRoller_Register(t *testing.T) {
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, nil
})
_, err := rollerController.Register(context.Background(), tmpAuthMsg)
_, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.NoError(t, err)
})
}
func TestRoller_SubmitProof(t *testing.T) {
func TestProver_SubmitProof(t *testing.T) {
tmpAuthMsg, prvKey := geneAuthMsg(t)
pubKey, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
id := "rollers_info_test"
id := "provers_info_test"
tmpProof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
Type: message.ProofTypeChunk,
@@ -191,9 +191,9 @@ func TestRoller_SubmitProof(t *testing.T) {
})
defer patchGuard.Reset()
rollermanager.InitRollerManager(nil)
provermanager.InitProverManager(nil)
taskChan, err := rollermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
taskChan, err := provermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
assert.NotNil(t, taskChan)
assert.NoError(t, err)
@@ -202,7 +202,7 @@ func TestRoller_SubmitProof(t *testing.T) {
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
err = rollerController.SubmitProof(tmpProof)
err = proverController.SubmitProof(tmpProof)
assert.Error(t, err)
})
@@ -227,7 +227,7 @@ func TestRoller_SubmitProof(t *testing.T) {
return nil
})
convey.Convey("get none rollers of prover task", t, func() {
convey.Convey("get none provers of prover task", t, func() {
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
return nil, nil
})
@@ -243,7 +243,7 @@ func TestRoller_SubmitProof(t *testing.T) {
tmpProof1.Sign(privKey)
_, err1 := tmpProof1.PublicKey()
assert.NoError(t, err1)
err2 := rollerController.SubmitProof(tmpProof1)
err2 := proverController.SubmitProof(tmpProof1)
fmt.Println(err2)
targetErr := fmt.Errorf("validator failure get none prover task for the proof")
assert.Equal(t, err2.Error(), targetErr.Error())
@@ -255,23 +255,23 @@ func TestRoller_SubmitProof(t *testing.T) {
TaskID: id,
ProverPublicKey: proofPubKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: "rollers_info_test",
ProvingStatus: int16(types.RollerAssigned),
ProverName: "provers_info_test",
ProvingStatus: int16(types.ProverAssigned),
CreatedAt: now,
}
return s, nil
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
})
convey.Convey("proof msg status is not ok", t, func() {
tmpProof.Status = message.StatusProofError
err1 := rollerController.SubmitProof(tmpProof)
err1 := proverController.SubmitProof(tmpProof)
assert.NoError(t, err1)
})
tmpProof.Status = message.StatusOk
@@ -287,7 +287,7 @@ func TestRoller_SubmitProof(t *testing.T) {
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
return false, targetErr
})
err1 := rollerController.SubmitProof(tmpProof)
err1 := proverController.SubmitProof(tmpProof)
assert.Nil(t, err1)
})
@@ -295,10 +295,10 @@ func TestRoller_SubmitProof(t *testing.T) {
return true, nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, rollersInfo *coordinatorType.RollersInfo) error {
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, proversInfo *coordinatorType.ProversInfo) error {
return nil
})
err1 := rollerController.SubmitProof(tmpProof)
err1 := proverController.SubmitProof(tmpProof)
assert.Nil(t, err1)
}

View File

@@ -11,8 +11,8 @@ import (
"scroll-tech/coordinator/internal/config"
)
// RollerAPI for rollers inorder to register and submit proof
type RollerAPI interface {
// ProverAPI for provers inorder to register and submit proof
type ProverAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
@@ -22,8 +22,8 @@ type RollerAPI interface {
func RegisterAPIs(cfg *config.Config, db *gorm.DB) []rpc.API {
return []rpc.API{
{
Namespace: "roller",
Service: RollerAPI(NewRollerController(cfg.RollerManagerConfig, db)),
Namespace: "prover",
Service: ProverAPI(NewProverController(cfg.ProverManagerConfig, db)),
Public: true,
},
}

View File

@@ -114,16 +114,16 @@ func (c *Collector) timeoutProofTask() {
}
for _, assignedProverTask := range assignedProverTasks {
timeoutDuration := time.Duration(c.cfg.RollerManagerConfig.CollectionTime) * time.Minute
timeoutDuration := time.Duration(c.cfg.ProverManagerConfig.CollectionTime) * time.Minute
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
if time.Since(assignedProverTask.AssignedAt) >= timeoutDuration {
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err = c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as RollerProofInvalid
// update prover task proving status as ProverProofInvalid
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.RollerProofInvalid, tx); err != nil {
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}

View File

@@ -12,7 +12,7 @@ import (
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -59,15 +59,15 @@ func (bp *BatchProofCollector) Collect(ctx context.Context) error {
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
return fmt.Errorf("no idle common roller when starting proof generation session, id:%s", batchTask.Hash)
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch) == 0 {
return fmt.Errorf("no idle common prover when starting proof generation session, id:%s", batchTask.Hash)
}
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
rollerStatusList, err := bp.sendTask(ctx, batchTask.Hash)
proverStatusList, err := bp.sendTask(ctx, batchTask.Hash)
if err != nil {
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
}
@@ -78,13 +78,13 @@ func (bp *BatchProofCollector) Collect(ctx context.Context) error {
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
}
for _, rollerStatus := range rollerStatusList {
for _, proverStatus := range proverStatusList {
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
ProverPublicKey: proverStatus.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
ProverName: proverStatus.Name,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
@@ -100,7 +100,7 @@ func (bp *BatchProofCollector) Collect(ctx context.Context) error {
return transErr
}
func (bp *BatchProofCollector) sendTask(ctx context.Context, taskID string) ([]*coordinatorType.RollerStatus, error) {
func (bp *BatchProofCollector) sendTask(ctx context.Context, taskID string) ([]*coordinatorType.ProverStatus, error) {
// get chunk proofs from db
chunkProofs, err := bp.chunkOrm.GetProofsByBatchHash(ctx, taskID)
if err != nil {

View File

@@ -13,7 +13,7 @@ import (
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -66,11 +66,11 @@ func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
return fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return fmt.Errorf("no idle chunk roller when starting proof generation session, id:%s", chunkTask.Hash)
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk) == 0 {
return fmt.Errorf("no idle chunk prover when starting proof generation session, id:%s", chunkTask.Hash)
}
rollerStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
proverStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
if err != nil {
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
}
@@ -82,19 +82,19 @@ func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
return err
}
for _, rollerStatus := range rollerStatusList {
for _, proverStatus := range proverStatusList {
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
ProverPublicKey: proverStatus.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
ProverName: proverStatus.Name,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, rollerStatus.PublicKey, err)
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, proverStatus.PublicKey, err)
}
}
return nil
@@ -102,7 +102,7 @@ func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
return transErr
}
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.RollerStatus, error) {
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.ProverStatus, error) {
// Get block hashes.
wrappedBlocks, err := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
if err != nil {

View File

@@ -13,7 +13,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -56,14 +56,14 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
return true
}
if len(proverTasks) >= int(b.cfg.RollerManagerConfig.SessionAttempts) {
if len(proverTasks) >= int(b.cfg.ProverManagerConfig.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
for _, proverTask := range proverTasks {
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
rollermanager.Manager.FreeTaskIDForRoller(proverTask.ProverPublicKey, hash)
provermanager.Manager.FreeTaskIDForProver(proverTask.ProverPublicKey, hash)
}
}
@@ -79,7 +79,7 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
}
}
// update the prover task status to let timeout checker don't check it.
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.RollerProofInvalid, tx); err != nil {
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.ProverProofInvalid, tx); err != nil {
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
}
return nil
@@ -91,7 +91,7 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
return true
}
func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, blockHashes []common.Hash, subProofs []*message.AggProof) ([]*coordinatorType.RollerStatus, error) {
func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, blockHashes []common.Hash, subProofs []*message.AggProof) ([]*coordinatorType.ProverStatus, error) {
sendMsg := &message.TaskMsg{
ID: hash,
Type: proveType,
@@ -100,26 +100,26 @@ func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, block
}
var err error
var rollerStatusList []*coordinatorType.RollerStatus
for i := uint8(0); i < b.cfg.RollerManagerConfig.RollersPerSession; i++ {
rollerPubKey, rollerName, sendErr := rollermanager.Manager.SendTask(proveType, sendMsg)
var proverStatusList []*coordinatorType.ProverStatus
for i := uint8(0); i < b.cfg.ProverManagerConfig.ProversPerSession; i++ {
proverPubKey, proverName, sendErr := provermanager.Manager.SendTask(proveType, sendMsg)
if sendErr != nil {
err = sendErr
continue
}
rollermanager.Manager.UpdateMetricRollerProofsLastAssignedTimestampGauge(rollerPubKey)
provermanager.Manager.UpdateMetricProverProofsLastAssignedTimestampGauge(proverPubKey)
rollerStatus := &coordinatorType.RollerStatus{
PublicKey: rollerPubKey,
Name: rollerName,
Status: types.RollerAssigned,
proverStatus := &coordinatorType.ProverStatus{
PublicKey: proverPubKey,
Name: proverName,
Status: types.ProverAssigned,
}
rollerStatusList = append(rollerStatusList, rollerStatus)
proverStatusList = append(proverStatusList, proverStatus)
}
if err != nil {
return nil, err
}
return rollerStatusList, nil
return proverStatusList, nil
}

View File

@@ -15,7 +15,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
)
@@ -33,8 +33,8 @@ var (
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
// ErrValidatorFailureProverTaskEmpty get none prover task
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
// ErrValidatorFailureRollerInfoHasProofValid proof is vaild
ErrValidatorFailureRollerInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
// ErrValidatorFailureProverInfoHasProofValid proof is vaild
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
)
// ZKProofReceiver the proof receiver
@@ -44,13 +44,13 @@ type ZKProofReceiver struct {
proverTaskOrm *orm.ProverTask
db *gorm.DB
cfg *config.RollerManagerConfig
cfg *config.ProverManagerConfig
verifier *verifier.Verifier
}
// NewZKProofReceiver create a proof receiver
func NewZKProofReceiver(cfg *config.RollerManagerConfig, db *gorm.DB) *ZKProofReceiver {
func NewZKProofReceiver(cfg *config.ProverManagerConfig, db *gorm.DB) *ZKProofReceiver {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
@@ -67,12 +67,12 @@ func NewZKProofReceiver(cfg *config.RollerManagerConfig, db *gorm.DB) *ZKProofRe
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
// HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
pk, _ := proofMsg.PublicKey()
rollermanager.Manager.UpdateMetricRollerProofsLastFinishedTimestampGauge(pk)
provermanager.Manager.UpdateMetricProverProofsLastFinishedTimestampGauge(pk)
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
if proverTask == nil || err != nil {
@@ -126,18 +126,18 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
if verifyErr != nil || !success {
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "roller pk", pk, "prove type",
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "prover pk", pk, "prove type",
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
// TODO: Roller needs to be slashed if proof is invalid.
// TODO: Prover needs to be slashed if proof is invalid.
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk, proofTime)
provermanager.Manager.UpdateMetricProverProofsVerifiedFailedTimeTimer(pk, proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
return nil
}
@@ -146,7 +146,7 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk, proofTime)
provermanager.Manager.UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk, proofTime)
return nil
}
@@ -171,30 +171,30 @@ func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunk
}
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (i) slash the prover for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureRollerInfoHasProofValid
log.Warn("prover has already submitted valid proof in proof session", "prover name", proverTask.ProverName,
"prover pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureProverInfoHasProofValid
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk, proofTime)
provermanager.Manager.UpdateMetricProverProofsGeneratedFailedTimeTimer(pk, proofTime)
log.Info("proof generated by roller failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
return ErrValidatorFailureProofMsgStatusNotOk
}
return nil
@@ -219,7 +219,7 @@ func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKe
return err
}
rollermanager.Manager.FreeTaskIDForRoller(pubKey, hash)
provermanager.Manager.FreeTaskIDForProver(pubKey, hash)
return nil
}
@@ -231,12 +231,12 @@ func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, pr
return nil
}
var proverTaskStatus types.RollerProveStatus
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
proverTaskStatus = types.RollerProofInvalid
proverTaskStatus = types.ProverProofInvalid
case types.ProvingTaskVerified:
proverTaskStatus = types.RollerProofValid
proverTaskStatus = types.ProverProofValid
}
err := m.db.Transaction(func(tx *gorm.DB) error {

View File

@@ -11,12 +11,12 @@ import (
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
)
var coordinatorRollersDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
var coordinatorProversDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/provers/disconnects/total", metrics.ScrollRegistry)
// TaskWorker held the roller task connection
// TaskWorker held the prover task connection
type TaskWorker struct{}
// NewTaskWorker create a task worker
@@ -38,8 +38,8 @@ func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthM
identity := authMsg.Identity
// create or get the roller message channel
taskCh, err := rollermanager.Manager.Register(ctx, pubKey, identity)
// create or get the prover message channel
taskCh, err := provermanager.Manager.Register(ctx, pubKey, identity)
if err != nil {
return &rpc.Subscription{}, err
}
@@ -48,7 +48,7 @@ func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthM
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
log.Info("roller register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
log.Info("prover register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
return rpcSub, nil
}
@@ -60,8 +60,8 @@ func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pu
log.Error("task worker subId:%d panic for:%v", err)
}
rollermanager.Manager.FreeRoller(pubKey)
log.Info("roller unregister", "name", identity.Name, "pubKey", pubKey)
provermanager.Manager.FreeProver(pubKey)
log.Info("prover unregister", "name", identity.Name, "pubKey", pubKey)
}()
for {
@@ -69,7 +69,7 @@ func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pu
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorRollersDisconnectsTotalCounter.Inc(1)
coordinatorProversDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
return
case <-notifier.Closed():

View File

@@ -0,0 +1,60 @@
package provermanager
import (
"time"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
)
type proverMetrics struct {
proverProofsVerifiedSuccessTimeTimer gethMetrics.Timer
proverProofsVerifiedFailedTimeTimer gethMetrics.Timer
proverProofsGeneratedFailedTimeTimer gethMetrics.Timer
proverProofsLastAssignedTimestampGauge gethMetrics.Gauge
proverProofsLastFinishedTimestampGauge gethMetrics.Gauge
}
func (r *proverManager) UpdateMetricProverProofsLastFinishedTimestampGauge(pk string) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *proverManager) UpdateMetricProverProofsLastAssignedTimestampGauge(pk string) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *proverManager) UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (r *proverManager) UpdateMetricProverProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (r *proverManager) UpdateMetricProverProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -0,0 +1,203 @@
package provermanager
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/big"
"sync"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
var (
once sync.Once
// Manager the global prover manager
Manager *proverManager
)
// ProverNode is the interface that controls the provers
type proverNode struct {
// Prover name
Name string
// Prover type
Type message.ProofType
// Prover public key
PublicKey string
// Prover version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to prover.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *proverMetrics
}
type proverManager struct {
proverPool cmap.ConcurrentMap
proverTaskOrm *orm.ProverTask
}
// InitProverManager init a prover manager
func InitProverManager(db *gorm.DB) {
once.Do(func() {
Manager = &proverManager{
proverPool: cmap.New(),
proverTaskOrm: orm.NewProverTask(db),
}
})
}
// Register the identity message to prover manager with the public key
func (r *proverManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := r.proverPool.Get(proverPublicKey)
if !ok {
taskIDs, err := r.reloadProverAssignedTasks(ctx, proverPublicKey)
if err != nil {
return nil, fmt.Errorf("register error:%w", err)
}
rMs := &proverMetrics{
proverProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
}
node = &proverNode{
Name: identity.Name,
Type: identity.ProverType,
Version: identity.Version,
PublicKey: proverPublicKey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
r.proverPool.Set(proverPublicKey, node)
}
prover := node.(*proverNode)
// avoid reconnection too frequently.
if time.Since(prover.registerTime) < 60 {
log.Warn("prover reconnect too frequently", "prover_name", identity.Name, "prover_type", identity.ProverType, "public key", proverPublicKey)
return nil, fmt.Errorf("prover reconnect too frequently")
}
// update register time and status
prover.registerTime = time.Now()
return prover.taskChan, nil
}
func (r *proverManager) reloadProverAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
var assignedProverTasks []orm.ProverTask
page := 0
limit := 100
for {
page++
whereFields := make(map[string]interface{})
whereFields["proving_status"] = int16(types.ProverAssigned)
orderBy := []string{"id asc"}
offset := (page - 1) * limit
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
if err != nil {
log.Warn("reloadProverAssignedTasks get all assigned failure", "error", err)
return nil, fmt.Errorf("reloadProverAssignedTasks error:%w", err)
}
if len(batchAssignedProverTasks) < limit {
break
}
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
}
taskIDs := cmap.New()
for _, assignedProverTask := range assignedProverTasks {
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.ProverAssigned) {
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
}
}
return &taskIDs, nil
}
// SendTask send the need proved message to prover
func (r *proverManager) SendTask(proverType message.ProofType, msg *message.TaskMsg) (string, string, error) {
tmpProver := r.selectProver(proverType)
if tmpProver == nil {
return "", "", errors.New("selectProver returns nil")
}
select {
case tmpProver.taskChan <- msg:
tmpProver.TaskIDs.Set(msg.ID, struct{}{})
default:
err := fmt.Errorf("prover channel is full, proverName:%s, publicKey:%s", tmpProver.Name, tmpProver.PublicKey)
return "", "", err
}
r.UpdateMetricProverProofsLastAssignedTimestampGauge(tmpProver.PublicKey)
return tmpProver.PublicKey, tmpProver.Name, nil
}
// ExistTaskIDForProver check the task exist
func (r *proverManager) ExistTaskIDForProver(pk string, id string) bool {
node, ok := r.proverPool.Get(pk)
if !ok {
return false
}
prover := node.(*proverNode)
return prover.TaskIDs.Has(id)
}
// FreeProver free the prover with the pk key
func (r *proverManager) FreeProver(pk string) {
r.proverPool.Pop(pk)
}
// FreeTaskIDForProver free a task of the pk prover
func (r *proverManager) FreeTaskIDForProver(pk string, id string) {
if node, ok := r.proverPool.Get(pk); ok {
prover := node.(*proverNode)
prover.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleProvers return the count of idle provers.
func (r *proverManager) GetNumberOfIdleProvers(proverType message.ProofType) (count int) {
for item := range r.proverPool.IterBuffered() {
prover := item.Val.(*proverNode)
if prover.TaskIDs.Count() == 0 && prover.Type == proverType {
count++
}
}
return count
}
func (r *proverManager) selectProver(proverType message.ProofType) *proverNode {
pubkeys := r.proverPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := r.proverPool.Get(pubkeys[idx.Int64()]); ok {
rn := val.(*proverNode)
if rn.TaskIDs.Count() == 0 && rn.Type == proverType {
return rn
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -1,60 +0,0 @@
package rollermanager
import (
"time"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer gethMetrics.Timer
rollerProofsVerifiedFailedTimeTimer gethMetrics.Timer
rollerProofsGeneratedFailedTimeTimer gethMetrics.Timer
rollerProofsLastAssignedTimestampGauge gethMetrics.Gauge
rollerProofsLastFinishedTimestampGauge gethMetrics.Gauge
}
func (r *rollerManager) UpdateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -1,203 +0,0 @@
package rollermanager
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/big"
"sync"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
var (
once sync.Once
// Manager the global roller manager
Manager *rollerManager
)
// RollerNode is the interface that controls the rollers
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to roller.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *rollerMetrics
}
type rollerManager struct {
rollerPool cmap.ConcurrentMap
proverTaskOrm *orm.ProverTask
}
// InitRollerManager init a roller manager
func InitRollerManager(db *gorm.DB) {
once.Do(func() {
Manager = &rollerManager{
rollerPool: cmap.New(),
proverTaskOrm: orm.NewProverTask(db),
}
})
}
// Register the identity message to roller manager with the public key
func (r *rollerManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := r.rollerPool.Get(proverPublicKey)
if !ok {
taskIDs, err := r.reloadRollerAssignedTasks(ctx, proverPublicKey)
if err != nil {
return nil, fmt.Errorf("register error:%w", err)
}
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: proverPublicKey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
r.rollerPool.Set(proverPublicKey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", proverPublicKey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
roller.registerTime = time.Now()
return roller.taskChan, nil
}
func (r *rollerManager) reloadRollerAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
var assignedProverTasks []orm.ProverTask
page := 0
limit := 100
for {
page++
whereFields := make(map[string]interface{})
whereFields["proving_status"] = int16(types.RollerAssigned)
orderBy := []string{"id asc"}
offset := (page - 1) * limit
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
if err != nil {
log.Warn("reloadRollerAssignedTasks get all assigned failure", "error", err)
return nil, fmt.Errorf("reloadRollerAssignedTasks error:%w", err)
}
if len(batchAssignedProverTasks) < limit {
break
}
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
}
taskIDs := cmap.New()
for _, assignedProverTask := range assignedProverTasks {
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
}
}
return &taskIDs, nil
}
// SendTask send the need proved message to roller
func (r *rollerManager) SendTask(rollerType message.ProofType, msg *message.TaskMsg) (string, string, error) {
tmpRoller := r.selectRoller(rollerType)
if tmpRoller == nil {
return "", "", errors.New("selectRoller returns nil")
}
select {
case tmpRoller.taskChan <- msg:
tmpRoller.TaskIDs.Set(msg.ID, struct{}{})
default:
err := fmt.Errorf("roller channel is full, rollerName:%s, publicKey:%s", tmpRoller.Name, tmpRoller.PublicKey)
return "", "", err
}
r.UpdateMetricRollerProofsLastAssignedTimestampGauge(tmpRoller.PublicKey)
return tmpRoller.PublicKey, tmpRoller.Name, nil
}
// ExistTaskIDForRoller check the task exist
func (r *rollerManager) ExistTaskIDForRoller(pk string, id string) bool {
node, ok := r.rollerPool.Get(pk)
if !ok {
return false
}
roller := node.(*rollerNode)
return roller.TaskIDs.Has(id)
}
// FreeRoller free the roller with the pk key
func (r *rollerManager) FreeRoller(pk string) {
r.rollerPool.Pop(pk)
}
// FreeTaskIDForRoller free a task of the pk roller
func (r *rollerManager) FreeTaskIDForRoller(pk string, id string) {
if node, ok := r.rollerPool.Get(pk); ok {
roller := node.(*rollerNode)
roller.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (r *rollerManager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for item := range r.rollerPool.IterBuffered() {
roller := item.Val.(*rollerNode)
if roller.TaskIDs.Count() == 0 && roller.Type == rollerType {
count++
}
}
return count
}
func (r *rollerManager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := r.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := r.rollerPool.Get(pubkeys[idx.Int64()]); ok {
rn := val.(*rollerNode)
if rn.TaskIDs.Count() == 0 && rn.Type == rollerType {
return rn
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -30,6 +30,7 @@ type Batch struct {
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
@@ -48,7 +49,7 @@ type Batch struct {
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1;default:1"`
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
@@ -131,7 +132,7 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
// InsertBatch inserts a new batch into the database.
// for unit test
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk) (*Batch, error) {
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
@@ -184,14 +185,20 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
ParentBatchHash: parentBatchHash.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
}
db := o.db.WithContext(ctx)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {

View File

@@ -27,7 +27,11 @@ type Chunk struct {
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
@@ -177,17 +181,19 @@ func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string
// InsertChunk inserts a new chunk into the database.
// for unit test
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, error) {
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
var parentChunkHash string
var parentChunkStateRoot string
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
@@ -195,13 +201,15 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, er
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
parentChunkHash = parentChunk.Hash
parentChunkStateRoot = parentChunk.StateRoot
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
var totalL2TxGas uint64
@@ -229,16 +237,23 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, er
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ParentChunkHash: parentChunkHash,
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}
db := o.db.WithContext(ctx)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
log.Error("failed to insert chunk", "hash", hash, "err", err)
return nil, err
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil

View File

@@ -19,15 +19,16 @@ type L2Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawRoot string `json:"withdraw_root" gorm:"withdraw_root"`
StateRoot string `json:"state_root" gorm:"state_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
@@ -53,7 +54,7 @@ func (*L2Block) TableName() string {
func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Select("header, transactions, withdraw_root")
db = db.Where("chunk_hash = ?", chunkHash)
db = db.Order("number ASC")
@@ -75,7 +76,7 @@ func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string)
return nil, fmt.Errorf("L2Block.GetL2BlocksByChunkHash error: %w, chunk hash: %v", err, chunkHash)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
@@ -83,31 +84,33 @@ func (o *L2Block) GetL2BlocksByChunkHash(ctx context.Context, chunkHash string)
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
// for unit test
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
var l2Blocks []L2Block
for _, block := range blocks {
header, err := json.Marshal(block.Header)
if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w, block hash: %v", err, block.Header.Hash().String())
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
txs, err := json.Marshal(block.Transactions)
if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w, block hash: %v", err, block.Header.Hash().String())
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawRoot: block.WithdrawRoot.Hex(),
StateRoot: block.Header.Root.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
}
l2Blocks = append(l2Blocks, l2Block)
}

View File

@@ -68,9 +68,9 @@ func TestProverTaskOrm(t *testing.T) {
proverTask := ProverTask{
TaskID: "test-hash",
ProverName: "roller-0",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(reward, 0),
AssignedAt: utils.NowUTC(),
}
@@ -87,7 +87,7 @@ func TestProverTaskOrm(t *testing.T) {
assert.Equal(t, resultReward, reward)
assert.Equal(t, resultReward.String(), "18446744073709551616")
proverTask.ProvingStatus = int16(types.RollerProofValid)
proverTask.ProvingStatus = int16(types.ProverProofValid)
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
@@ -107,9 +107,9 @@ func TestProverTaskOrmUint256(t *testing.T) {
rewardUint256.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10)
proverTask := ProverTask{
TaskID: "test-hash",
ProverName: "roller-0",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(rewardUint256, 0),
AssignedAt: utils.NowUTC(),
}

View File

@@ -13,7 +13,7 @@ import (
"scroll-tech/common/types/message"
)
// ProverTask is assigned rollers info of chunk/batch proof prover task
// ProverTask is assigned provers info of chunk/batch proof prover task
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`
@@ -115,7 +115,7 @@ func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID,
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("proving_status", int(types.RollerAssigned))
db = db.Where("proving_status", int(types.ProverAssigned))
db = db.Limit(limit)
var proverTasks []ProverTask
@@ -146,7 +146,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
@@ -162,7 +162,7 @@ func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofTyp
}
// UpdateAllProverTaskProvingStatusOfTaskID updates all the proving_status of a specific task id.
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]

View File

@@ -9,8 +9,8 @@ import (
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// BatchInfo contains the BlockBatch's main info

View File

@@ -5,17 +5,17 @@ import (
"scroll-tech/common/types/message"
)
// RollersInfo is assigned rollers info of a task (session)
type RollersInfo struct {
// ProversInfo is assigned provers info of a task (session)
type ProversInfo struct {
ID string `json:"id"`
RollerStatusList []*RollerStatus `json:"rollers"`
ProverStatusList []*ProverStatus `json:"provers"`
StartTimestamp int64 `json:"start_timestamp"`
ProveType message.ProofType `json:"prove_type,omitempty"`
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
// ProverStatus is the prover name and prover prove status
type ProverStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status types.RollerProveStatus `json:"status"`
Status types.ProverProveStatus `json:"status"`
}

View File

@@ -31,7 +31,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
)
@@ -61,7 +61,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (*http.Server, *cron.Collector) {
func setupCoordinator(t *testing.T, proversPerSession uint8, wsURL string, resetDB bool) (*http.Server, *cron.Collector) {
var err error
db, err = database.InitDB(dbCfg)
assert.NoError(t, err)
@@ -72,8 +72,8 @@ func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, reset
}
conf := config.Config{
RollerManagerConfig: &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
ProverManagerConfig: &config.ProverManagerConfig{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
@@ -85,7 +85,7 @@ func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, reset
tmpAPI := api.RegisterAPIs(&conf, db)
handler, _, err := utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], tmpAPI, flate.NoCompression)
assert.NoError(t, err)
rollermanager.InitRollerManager(db)
provermanager.InitProverManager(db)
return handler, proofCollector
}
@@ -139,7 +139,7 @@ func TestApis(t *testing.T) {
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
t.Run("TestIdleProverSelection", testIdleProverSelection)
t.Run("TestGracefulRestart", testGracefulRestart)
// Teardown
@@ -157,14 +157,14 @@ func testHandshake(t *testing.T) {
proofCollector.Stop()
}()
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
defer roller1.close()
prover1 := newMockProver(t, "prover_test", wsURL, message.ProofTypeChunk)
defer prover1.close()
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
defer roller2.close()
prover2 := newMockProver(t, "prover_test", wsURL, message.ProofTypeBatch)
defer prover2.close()
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
}
func testFailedHandshake(t *testing.T) {
@@ -177,7 +177,7 @@ func testFailedHandshake(t *testing.T) {
}()
// prepare
name := "roller_test"
name := "prover_test"
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -222,7 +222,7 @@ func testFailedHandshake(t *testing.T) {
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 0, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
}
func testSeveralConnections(t *testing.T) {
@@ -236,25 +236,25 @@ func testSeveralConnections(t *testing.T) {
var (
batch = 200
eg = errgroup.Group{}
rollers = make([]*mockRoller, batch)
provers = make([]*mockProver, batch)
)
for i := 0; i < batch; i += 2 {
idx := i
eg.Go(func() error {
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL, message.ProofTypeChunk)
rollers[idx+1] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx+1), wsURL, message.ProofTypeBatch)
provers[idx] = newMockProver(t, "prover_test_"+strconv.Itoa(idx), wsURL, message.ProofTypeChunk)
provers[idx+1] = newMockProver(t, "prover_test_"+strconv.Itoa(idx+1), wsURL, message.ProofTypeBatch)
return nil
})
}
assert.NoError(t, eg.Wait())
// check roller's idle connections
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// check prover's idle connections
assert.Equal(t, batch/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, batch/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
// close connection
for _, roller := range rollers {
roller.close()
for _, prover := range provers {
prover.close()
}
var (
@@ -264,11 +264,11 @@ func testSeveralConnections(t *testing.T) {
for {
select {
case <-tick:
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk) == 0 {
return
}
case <-tickStop:
t.Error("roller connect is blocked")
t.Error("prover connect is blocked")
return
}
}
@@ -282,33 +282,33 @@ func testValidProof(t *testing.T) {
collector.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
// create mock provers.
provers := make([]*mockProver, 6)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
// only roller 0 & 1 submit valid proofs.
// only prover 0 & 1 submit valid proofs.
proofStatus := generatedFailed
if i <= 1 {
proofStatus = verifiedSuccess
}
rollers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
provers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -350,26 +350,26 @@ func testInvalidProof(t *testing.T) {
collector.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
// create mock provers.
provers := make([]*mockProver, 6)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -411,26 +411,26 @@ func testProofGeneratedFailed(t *testing.T) {
collector.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 6)
for i := 0; i < len(rollers); i++ {
// create mock provers.
provers := make([]*mockProver, 6)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -472,16 +472,16 @@ func testTimeoutProof(t *testing.T) {
collector.Stop()
}()
// create first chunk & batch mock roller, that will not send any proof.
chunkRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(0), wsURL, message.ProofTypeChunk)
batchRoller1 := newMockRoller(t, "roller_test"+strconv.Itoa(1), wsURL, message.ProofTypeBatch)
// create first chunk & batch mock prover, that will not send any proof.
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), wsURL, message.ProofTypeChunk)
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), wsURL, message.ProofTypeBatch)
defer func() {
// close connection
chunkRoller1.close()
batchRoller1.close()
chunkProver1.close()
batchProver1.close()
}()
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -492,7 +492,7 @@ func testTimeoutProof(t *testing.T) {
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status, it should be assigned, because roller didn't send any proof
// verify proof status, it should be assigned, because prover didn't send any proof
ok := utils.TryTimes(30, func() bool {
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
@@ -506,20 +506,20 @@ func testTimeoutProof(t *testing.T) {
})
assert.Falsef(t, !ok, "failed to check proof status")
// create second mock roller, that will send valid proof.
chunkRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(2), wsURL, message.ProofTypeChunk)
chunkRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
batchRoller2 := newMockRoller(t, "roller_test"+strconv.Itoa(3), wsURL, message.ProofTypeBatch)
batchRoller2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
// create second mock prover, that will send valid proof.
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), wsURL, message.ProofTypeChunk)
chunkProver2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), wsURL, message.ProofTypeBatch)
batchProver2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
defer func() {
// close connection
chunkRoller2.close()
batchRoller2.close()
chunkProver2.close()
batchProver2.close()
}()
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
// verify proof status, it should be verified now, because second roller sent valid proof
// verify proof status, it should be verified now, because second prover sent valid proof
ok = utils.TryTimes(200, func() bool {
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
@@ -534,7 +534,7 @@ func testTimeoutProof(t *testing.T) {
assert.Falsef(t, !ok, "failed to check proof status")
}
func testIdleRollerSelection(t *testing.T) {
func testIdleProverSelection(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 1, wsURL, true)
@@ -543,27 +543,27 @@ func testIdleRollerSelection(t *testing.T) {
collector.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 20)
for i := 0; i < len(rollers); i++ {
// create mock provers.
provers := make([]*mockProver, 20)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL, proofType)
rollers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, len(provers)/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, len(provers)/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -610,24 +610,24 @@ func testGracefulRestart(t *testing.T) {
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// create mock roller
chunkRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
batchRoller := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
// wait 10 seconds, coordinator restarts before roller submits proof
chunkRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
batchRoller.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
// create mock prover
chunkProver := newMockProver(t, "prover_test", wsURL, message.ProofTypeChunk)
batchProver := newMockProver(t, "prover_test", wsURL, message.ProofTypeBatch)
// wait 10 seconds, coordinator restarts before prover submits proof
chunkProver.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
batchProver.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
// wait for coordinator to dispatch task
<-time.After(5 * time.Second)
// the coordinator will delete the roller if the subscription is closed.
chunkRoller.close()
batchRoller.close()
// the coordinator will delete the prover if the subscription is closed.
chunkProver.close()
batchProver.close()
provingStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, provingStatus)
// Close rollerManager and ws handler.
// Close proverManager and ws handler.
handler.Shutdown(context.Background())
collector.Stop()
@@ -638,7 +638,7 @@ func testGracefulRestart(t *testing.T) {
newCollector.Stop()
}()
// at this point, roller haven't submitted
// at this point, prover haven't submitted
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, status)
@@ -646,12 +646,12 @@ func testGracefulRestart(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, status) // chunk proofs not ready yet
// will overwrite the roller client for `SubmitProof`
chunkRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
batchRoller.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
// will overwrite the prover client for `SubmitProof`
chunkProver.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
batchProver.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
defer func() {
chunkRoller.close()
batchRoller.close()
chunkProver.close()
batchProver.close()
}()
// verify proof status
@@ -662,8 +662,8 @@ func testGracefulRestart(t *testing.T) {
for {
select {
case <-tick:
// this proves that the roller submits to the new coordinator,
// because the roller client for `submitProof` has been overwritten
// this proves that the prover submits to the new coordinator,
// because the prover client for `submitProof` has been overwritten
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)

View File

@@ -25,8 +25,8 @@ const (
generatedFailed
)
type mockRoller struct {
rollerName string
type mockProver struct {
proverName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
@@ -40,26 +40,26 @@ type mockRoller struct {
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
func newMockProver(t *testing.T, proverName string, wsURL string, proofType message.ProofType) *mockProver {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
prover := &mockProver{
proverName: proverName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
prover.client, prover.sub, err = prover.connectToCoordinator()
assert.NoError(t, err)
return roller
return prover
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
@@ -69,8 +69,8 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
RollerType: r.proofType,
Name: r.proverName,
ProverType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
@@ -90,7 +90,7 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
return client, sub, nil
}
func (r *mockRoller) releaseTasks() {
func (r *mockProver) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
@@ -98,10 +98,10 @@ func (r *mockRoller) releaseTasks() {
})
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
// Wait for the proof task, after receiving the proof task, prover submits proof after proofTime secs.
func (r *mockProver) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the prover first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeProver()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
@@ -118,7 +118,7 @@ func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration,
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
func (r *mockProver) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
@@ -150,7 +150,7 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
}
}
func (r *mockRoller) close() {
func (r *mockProver) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

View File

@@ -24,13 +24,13 @@ comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
on l1_message (msg_hash) where deleted_at IS NULL;
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
on l1_message (queue_index) where deleted_at IS NULL;
create index l1_message_height_index
on l1_message (height);
on l1_message (height) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -22,10 +22,10 @@ comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
on l1_block (hash) where deleted_at IS NULL;
create unique index l1_block_number_uindex
on l1_block (number);
on l1_block (number) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -9,7 +9,8 @@ create table l2_block
parent_hash VARCHAR NOT NULL,
header TEXT NOT NULL,
transactions TEXT NOT NULL,
withdraw_trie_root VARCHAR NOT NULL,
withdraw_root VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL,
@@ -24,13 +25,13 @@ create table l2_block
);
create unique index l2_block_hash_uindex
on l2_block (hash);
on l2_block (hash) where deleted_at IS NULL;
create unique index l2_block_number_uindex
on l2_block (number);
on l2_block (number) where deleted_at IS NULL;
create index l2_block_chunk_hash_index
on l2_block (chunk_hash);
on l2_block (chunk_hash) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -13,6 +13,10 @@ create table chunk
total_l1_messages_popped_before BIGINT NOT NULL,
total_l1_messages_popped_in_chunk INTEGER NOT NULL,
start_block_time BIGINT NOT NULL,
parent_chunk_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
parent_chunk_state_root VARCHAR NOT NULL,
withdraw_root VARCHAR NOT NULL,
-- proof
proving_status SMALLINT NOT NULL DEFAULT 1,
@@ -38,13 +42,13 @@ comment
on column chunk.proving_status is 'undefined, unassigned, assigned, proved, verified, failed';
create unique index chunk_index_uindex
on chunk (index);
on chunk (index) where deleted_at IS NULL;
create unique index chunk_hash_uindex
on chunk (hash);
on chunk (hash) where deleted_at IS NULL;
create index batch_hash_index
on chunk (batch_hash);
on chunk (batch_hash) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -12,6 +12,7 @@ create table batch
end_chunk_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
withdraw_root VARCHAR NOT NULL,
parent_batch_hash VARCHAR NOT NULL,
batch_header BYTEA NOT NULL,
-- proof
@@ -40,10 +41,10 @@ create table batch
);
create unique index batch_index_uindex
on batch (index);
on batch (index) where deleted_at IS NULL;
create unique index batch_hash_uindex
on batch (hash);
on batch (hash) where deleted_at IS NULL;
comment
on column batch.chunk_proofs_status is 'undefined, pending, ready';

View File

@@ -32,7 +32,7 @@ comment
on column prover_task.task_type is 'undefined, chunk, batch';
comment
on column prover_task.proving_status is 'undefined, roller assigned, roller proof valid, roller proof invalid';
on column prover_task.proving_status is 'undefined, prover assigned, prover proof valid, prover proof invalid';
comment
on column prover_task.failure_type is 'undefined';

View File

@@ -7,6 +7,6 @@ use (
./coordinator
./database
./prover-stats-api
./roller
./prover
./tests/integration-test
)

View File

@@ -26,7 +26,7 @@ var (
TaskID: "1",
ProverPublicKey: proverPubkey,
ProverName: "prover-0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromInt(10),
}
@@ -34,7 +34,7 @@ var (
TaskID: "2",
ProverPublicKey: proverPubkey,
ProverName: "prover-1",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromInt(12),
}

View File

@@ -67,9 +67,9 @@ func TestProverTaskOrm(t *testing.T) {
proverTask := ProverTask{
TaskID: "test-hash",
ProverName: "roller-0",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(reward, 0),
}
@@ -84,7 +84,7 @@ func TestProverTaskOrm(t *testing.T) {
assert.Equal(t, resultReward, reward)
assert.Equal(t, resultReward.String(), "18446744073709551616")
proverTask.ProvingStatus = int16(types.RollerProofValid)
proverTask.ProvingStatus = int16(types.ProverProofValid)
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
getTask, err = proverTaskOrm.GetProverTasksByHash(context.Background(), "test-hash")
@@ -102,9 +102,9 @@ func TestProverTaskOrmUint256(t *testing.T) {
rewardUint256.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10)
proverTask := ProverTask{
TaskID: "test-hash",
ProverName: "roller-0",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(rewardUint256, 0),
}

View File

@@ -12,7 +12,7 @@ import (
"gorm.io/gorm"
)
// ProverTask is assigned rollers info of chunk/batch proof prover task
// ProverTask is assigned provers info of chunk/batch proof prover task
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`

View File

@@ -123,7 +123,7 @@ var (
TaskID: "1",
ProverPublicKey: proverPubkey,
ProverName: "prover-0",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromInt(10),
}
@@ -131,7 +131,7 @@ var (
TaskID: "2",
ProverPublicKey: proverPubkey,
ProverName: "prover-1",
ProvingStatus: int16(types.RollerAssigned),
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromInt(12),
}
)

Some files were not shown because too many files have changed in this diff Show More