mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
13 Commits
develop
...
feat/refac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e69fc7f64a | ||
|
|
05e287fd07 | ||
|
|
467dfbe37c | ||
|
|
258812b92c | ||
|
|
3fa646ab29 | ||
|
|
ae826cd86f | ||
|
|
e78c89c8e9 | ||
|
|
a3c18b99a4 | ||
|
|
d94dcafed0 | ||
|
|
27d6538b82 | ||
|
|
f6d4ab087a | ||
|
|
51bbd0d56f | ||
|
|
f166787d00 |
13
Makefile
13
Makefile
@@ -1,7 +1,5 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
ZKP_VERSION=release-1220
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -40,16 +38,5 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
|
||||
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
|
||||
|
||||
|
||||
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
|
||||
mkdir -p test_params
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
|
||||
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
|
||||
cd ./roller && make test-gpu-prover
|
||||
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
|
||||
cd ./coordinator && make test-gpu-verifier
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -73,6 +73,19 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportGenesisBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
batchHeader := []byte{}
|
||||
stateRoot := common.Hash{}
|
||||
|
||||
_, err = l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackRelayL1Message(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -31,6 +31,7 @@ func init() {
|
||||
app.Usage = "The Scroll Rollup Relayer"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.RollupRelayerFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return cutils.LogSetup(ctx)
|
||||
@@ -70,7 +71,8 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -3,8 +3,10 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -42,6 +44,7 @@ type Layer2Relayer struct {
|
||||
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -78,7 +81,7 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -115,6 +118,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
@@ -142,10 +146,127 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if initGenesis {
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) initializeGenesis() error {
|
||||
if count, err := r.batchOrm.GetBatchCount(r.ctx); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported", "batch count", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := r.l2Client.HeaderByNumber(r.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateProvingStatus(r.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
var batch *orm.Batch
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateBatchHashInRange(r.ctx, 0, 0, batch.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateProvingStatus(r.ctx, batch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateRollupStatus(r.ctx, batch.Hash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
// commit genesis batch on L1
|
||||
// note: we do this inside the DB transaction so that we can revert all DB changes if this step fails
|
||||
return r.commitGenesisBatch(batch.Hash, batch.BatchHeader, common.HexToHash(batch.StateRoot))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update genesis transaction failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis chunk and batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation, pending count: %d", r.rollupSender.PendingCount())
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash)
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
}
|
||||
if !confirmation.IsSuccessful {
|
||||
return fmt.Errorf("import genesis batch tx failed")
|
||||
}
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
|
||||
@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -57,12 +57,12 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
@@ -73,36 +73,36 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, types.ProvingTaskVerified)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -113,26 +113,26 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
|
||||
batchHash, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash, provingStatus)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
return batchHash
|
||||
return batch.Hash
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
@@ -177,7 +177,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -187,9 +187,9 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
var err error
|
||||
batchHashes[i], err = batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
@@ -235,17 +235,17 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchHash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -255,8 +255,8 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
}
|
||||
|
||||
confirmations := []BatchConfirmation{
|
||||
{batchHash: batchHash1, isSuccessful: true},
|
||||
{batchHash: batchHash2, isSuccessful: false},
|
||||
{batchHash: batch1.Hash, isSuccessful: true},
|
||||
{batchHash: batch2.Hash, isSuccessful: false},
|
||||
}
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
@@ -283,7 +283,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
|
||||
@@ -72,14 +72,13 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
endChunkIndex := dbChunks[numChunks-1].Index
|
||||
endChunkHash := dbChunks[numChunks-1].Hash
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var batchHash string
|
||||
batchHash, err = p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
err = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batchHash, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -44,10 +44,7 @@ type L2WatcherClient struct {
|
||||
|
||||
*ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
|
||||
@@ -77,12 +74,9 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
|
||||
w := L2WatcherClient{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
Client: client,
|
||||
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l2MessageOrm: l2MessageOrm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
@@ -98,78 +92,9 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
stopped: 0,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if err := w.initializeGenesis(); err != nil {
|
||||
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
|
||||
}
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) initializeGenesis() error {
|
||||
if count, err := w.batchOrm.GetBatchCount(w.ctx); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported", "batch count", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &bridgeTypes.Chunk{
|
||||
Blocks: []*bridgeTypes.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
err = w.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = w.chunkOrm.InsertChunk(w.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
|
||||
if err = w.chunkOrm.UpdateProvingStatus(w.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
var batchHash string
|
||||
batchHash, err = w.batchOrm.InsertBatch(w.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
|
||||
if err = w.chunkOrm.UpdateBatchHashInRange(w.ctx, 0, 0, batchHash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
|
||||
}
|
||||
|
||||
if err = w.batchOrm.UpdateProvingStatus(w.ctx, batchHash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = w.batchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update genesis transaction failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis chunk and batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
|
||||
@@ -189,9 +189,9 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (string, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return "", errors.New("invalid args")
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -202,7 +202,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get the latest batch", "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batchIndex uint64
|
||||
@@ -221,7 +221,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
|
||||
@@ -233,7 +233,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
log.Error("failed to create batch header",
|
||||
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
|
||||
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numChunks := len(chunks)
|
||||
@@ -255,10 +255,10 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
|
||||
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil {
|
||||
log.Error("failed to insert batch", "batch", newBatch, "err", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBatch.Hash, nil
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateSkippedBatches updates the skipped batches in the database.
|
||||
|
||||
@@ -188,20 +188,22 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
hash1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err := batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash1 := batchHeader1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
hash2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err := batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -68,7 +68,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
|
||||
@@ -29,7 +29,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// L1BlockStatus represents current l1 block processing status
|
||||
@@ -161,11 +159,18 @@ type RollerStatus struct {
|
||||
|
||||
// SessionInfo is assigned rollers info of a block batch (session)
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Rollers map[string]*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
Attempts uint8 `json:"attempts,omitempty"`
|
||||
ProveType message.ProveType `json:"prove_type,omitempty"`
|
||||
ID int `json:"id" db:"id"`
|
||||
TaskID string `json:"task_id" db:"task_id"`
|
||||
RollerPublicKey string `json:"roller_public_key" db:"roller_public_key"`
|
||||
ProveType int `json:"prove_type" db:"prove_type"`
|
||||
RollerName string `json:"roller_name" db:"roller_name"`
|
||||
ProvingStatus int `json:"proving_status" db:"proving_status"`
|
||||
FailureType int `json:"failure_type" db:"failure_type"`
|
||||
Reward uint64 `json:"reward" db:"reward"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
|
||||
|
||||
@@ -16,6 +16,10 @@ var (
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
@@ -66,4 +70,10 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.0.1"
|
||||
var tag = "v4.0.2"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -71,7 +71,7 @@ Reference testnet [run_deploy_contracts.sh](https://github.com/scroll-tech/testn
|
||||
|
||||
## Deployment using Foundry
|
||||
|
||||
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_ZK_ROLLUP_PROXY_ADDR` as environment variables.
|
||||
Note: The Foundry scripts take parameters like `CHAIN_ID_L2` and `L1_SCROLL_CHAIN_PROXY_ADDR` as environment variables.
|
||||
|
||||
```bash
|
||||
# allexport
|
||||
@@ -101,4 +101,4 @@ $ source .env.l2_addresses
|
||||
# Initialize contracts
|
||||
$ forge script scripts/foundry/InitializeL1BridgeContracts.s.sol:InitializeL1BridgeContracts --rpc-url $SCROLL_L1_RPC --broadcast
|
||||
$ forge script scripts/foundry/InitializeL2BridgeContracts.s.sol:InitializeL2BridgeContracts --rpc-url $SCROLL_L2_RPC --broadcast
|
||||
```
|
||||
```
|
||||
|
||||
@@ -11,7 +11,7 @@ async function main() {
|
||||
|
||||
const ScrollChainCommitmentVerifier = await ethers.getContractFactory("ScrollChainCommitmentVerifier", deployer);
|
||||
|
||||
const L1ScrollChainAddress = process.env.L1_ZK_ROLLUP_PROXY_ADDR!;
|
||||
const L1ScrollChainAddress = process.env.L1_SCROLL_CHAIN_PROXY_ADDR!;
|
||||
let PoseidonUnit2Address = process.env.POSEIDON_UNIT2_ADDR;
|
||||
|
||||
if (!PoseidonUnit2Address) {
|
||||
|
||||
@@ -7,18 +7,18 @@ import {console} from "forge-std/console.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
import {TransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
|
||||
import {L1CustomERC20Gateway} from "../../src/L1/gateways/L1CustomERC20Gateway.sol";
|
||||
import {L1ERC1155Gateway} from "../../src/L1/gateways/L1ERC1155Gateway.sol";
|
||||
import {L1ERC721Gateway} from "../../src/L1/gateways/L1ERC721Gateway.sol";
|
||||
import {L1ETHGateway} from "../../src/L1/gateways/L1ETHGateway.sol";
|
||||
import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
|
||||
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
|
||||
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
|
||||
import {RollupVerifier} from "../../src/libraries/verifier/RollupVerifier.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
|
||||
@@ -29,14 +29,14 @@ contract DeployL1BridgeContracts is Script {
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
address L1_ZKEVM_VERIFIER_ADDR = vm.envAddress("L1_ZKEVM_VERIFIER_ADDR");
|
||||
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// note: the RollupVerifier library is deployed implicitly
|
||||
|
||||
deployMultipleVersionRollupVerifier();
|
||||
deployProxyAdmin();
|
||||
deployL1Whitelist();
|
||||
deployL1MessageQueue();
|
||||
@@ -55,6 +55,12 @@ contract DeployL1BridgeContracts is Script {
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
MultipleVersionRollupVerifier rollupVerifier = new MultipleVersionRollupVerifier(L1_ZKEVM_VERIFIER_ADDR);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
@@ -76,8 +82,8 @@ contract DeployL1BridgeContracts is Script {
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("L1_ZK_ROLLUP_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_ZK_ROLLUP_PROXY_ADDR", address(proxy));
|
||||
logAddress("L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_SCROLL_CHAIN_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1MessageQueue() internal {
|
||||
@@ -170,8 +176,8 @@ contract DeployL1BridgeContracts is Script {
|
||||
new bytes(0)
|
||||
);
|
||||
|
||||
logAddress("ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
logAddress("L1_ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
|
||||
logAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL1CustomERC20Gateway() internal {
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
import { console } from "forge-std/console.sol";
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import { ScrollChainCommitmentVerifier } from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
|
||||
import {ScrollChainCommitmentVerifier} from "../../src/L1/rollup/ScrollChainCommitmentVerifier.sol";
|
||||
|
||||
contract DeployScrollChainCommitmentVerifier is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
|
||||
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
|
||||
address POSEIDON_UNIT2_ADDR = vm.envAddress("POSEIDON_UNIT2_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployScrollChainCommitmentVerifier();
|
||||
deployScrollChainCommitmentVerifier();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployScrollChainCommitmentVerifier() internal {
|
||||
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
|
||||
POSEIDON_UNIT2_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR
|
||||
);
|
||||
function deployScrollChainCommitmentVerifier() internal {
|
||||
ScrollChainCommitmentVerifier verifier = new ScrollChainCommitmentVerifier(
|
||||
POSEIDON_UNIT2_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR
|
||||
);
|
||||
|
||||
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
|
||||
}
|
||||
logAddress("L1_SCROLL_CHAIN_COMMITMENT_VERIFIER", address(verifier));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,14 +20,13 @@ contract InitializeL1BridgeContracts is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envOr("MAX_L2_TX_IN_CHUNK", uint256(44));
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
|
||||
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
|
||||
|
||||
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
|
||||
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
address L1_ZK_ROLLUP_PROXY_ADDR = vm.envAddress("L1_ZK_ROLLUP_PROXY_ADDR");
|
||||
address L1_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_ROLLUP_VERIFIER_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
|
||||
address L2_GAS_PRICE_ORACLE_PROXY_ADDR = vm.envAddress("L2_GAS_PRICE_ORACLE_PROXY_ADDR");
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
@@ -38,7 +37,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
|
||||
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
@@ -55,8 +55,12 @@ contract InitializeL1BridgeContracts is Script {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// initialize ScrollChain
|
||||
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).initialize(L1_MESSAGE_QUEUE_PROXY_ADDR, L1_ROLLUP_VERIFIER_ADDR, MAX_L2_TX_IN_CHUNK);
|
||||
ScrollChain(L1_ZK_ROLLUP_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR,
|
||||
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
|
||||
MAX_L2_TX_IN_CHUNK
|
||||
);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
|
||||
|
||||
// initialize L2GasPriceOracle
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(0, 0, 0, 0);
|
||||
@@ -65,8 +69,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
// initialize L1MessageQueue
|
||||
L1MessageQueue(L1_MESSAGE_QUEUE_PROXY_ADDR).initialize(
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR,
|
||||
ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
|
||||
10000000
|
||||
);
|
||||
@@ -75,13 +79,13 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L1ScrollMessenger(payable(L1_SCROLL_MESSENGER_PROXY_ADDR)).initialize(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR,
|
||||
L1_ZK_ROLLUP_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize EnforcedTxGateway
|
||||
EnforcedTxGateway(payable(ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
EnforcedTxGateway(payable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR
|
||||
);
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ async function main() {
|
||||
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const rollupAddr = process.env.L1_ZK_ROLLUP_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
|
||||
const rollupAddr = process.env.L1_SCROLL_CHAIN_PROXY_ADDR || addressFile.get("ScrollChain.proxy") || "0x";
|
||||
console.log("Using rollup proxy address:", rollupAddr);
|
||||
|
||||
const ScrollChain = await ethers.getContractAt("ScrollChain", rollupAddr, deployer);
|
||||
|
||||
@@ -51,10 +51,12 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
PublicKey: pk,
|
||||
}
|
||||
for id, sess := range m.sessions {
|
||||
if _, ok := sess.info.Rollers[pk]; ok {
|
||||
info.ActiveSessionStartTime = time.Unix(sess.info.StartTimestamp, 0)
|
||||
info.ActiveSession = id
|
||||
break
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
if sessionInfo.RollerPublicKey == pk {
|
||||
info.ActiveSessionStartTime = *sessionInfo.CreatedAt
|
||||
info.ActiveSession = id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, info)
|
||||
@@ -66,14 +68,16 @@ func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
now := time.Now()
|
||||
var nameList []string
|
||||
for pk := range sess.info.Rollers {
|
||||
nameList = append(nameList, sess.info.Rollers[pk].Name)
|
||||
var taskID string
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
taskID = sessionInfo.TaskID
|
||||
nameList = append(nameList, sessionInfo.RollerName)
|
||||
}
|
||||
info := SessionInfo{
|
||||
ID: sess.info.ID,
|
||||
ID: taskID,
|
||||
Status: status.String(),
|
||||
AssignedRollers: nameList,
|
||||
StartTime: time.Unix(sess.info.StartTimestamp, 0),
|
||||
StartTime: *sess.sessionInfos[0].CreatedAt,
|
||||
Error: errMsg,
|
||||
}
|
||||
if finished {
|
||||
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
@@ -57,7 +57,7 @@ type rollerProofStatus struct {
|
||||
|
||||
// Contains all the information on an ongoing proof generation session.
|
||||
type session struct {
|
||||
info *types.SessionInfo
|
||||
sessionInfos []*types.SessionInfo
|
||||
// finish channel is used to pass the public key of the rollers who finished proving process.
|
||||
finishChan chan rollerProofStatus
|
||||
}
|
||||
@@ -248,24 +248,20 @@ func (m *Manager) restorePrevSessions() {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
sessionInfosMaps := make(map[string][]*types.SessionInfo)
|
||||
for _, v := range prevSessions {
|
||||
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
|
||||
v.RollerName, "prove type", v.ProveType, "public key", v.RollerPublicKey, "proof status", v.ProvingStatus)
|
||||
sessionInfosMaps[v.TaskID] = append(sessionInfosMaps[v.TaskID], v)
|
||||
}
|
||||
|
||||
for taskID, sessionInfos := range sessionInfosMaps {
|
||||
sess := &session{
|
||||
info: v,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[sess.info.ID] = sess
|
||||
|
||||
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"restore roller info for session",
|
||||
"session id", sess.info.ID,
|
||||
"roller name", roller.Name,
|
||||
"prove type", sess.info.ProveType,
|
||||
"public key", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
m.sessions[taskID] = sess
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
|
||||
@@ -287,36 +283,40 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
|
||||
}
|
||||
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
|
||||
|
||||
var tmpSessionInfo *types.SessionInfo
|
||||
for _, si := range sess.sessionInfos {
|
||||
// get the send session info of this proof msg
|
||||
if si.TaskID == msg.ID && si.RollerPublicKey == pk {
|
||||
tmpSessionInfo = si
|
||||
}
|
||||
}
|
||||
|
||||
if tmpSessionInfo == nil {
|
||||
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
|
||||
}
|
||||
|
||||
proofTime := time.Since(*tmpSessionInfo.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the session.
|
||||
roller, ok := sess.info.Rollers[pk]
|
||||
if !ok {
|
||||
return fmt.Errorf("roller %s %s (%s) is not eligible to partake in proof session %v", roller.Name, sess.info.ProveType, roller.PublicKey, msg.ID)
|
||||
}
|
||||
if roller.Status == types.RollerProofValid {
|
||||
if types.RollerProveStatus(tmpSessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey,
|
||||
"prove type", tmpSessionInfo.ProveType,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
log.Info(
|
||||
"handling zk proof",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"prove type", sess.info.ProveType,
|
||||
"proof time", proofTimeSec,
|
||||
)
|
||||
|
||||
log.Info("handling zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName, "roller pk",
|
||||
tmpSessionInfo.RollerPublicKey, "prove type", tmpSessionInfo.ProveType, "proof time", proofTimeSec)
|
||||
|
||||
defer func() {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
@@ -344,12 +344,12 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey,
|
||||
"prove type", msg.Type,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
@@ -383,8 +383,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
}
|
||||
|
||||
@@ -411,18 +411,32 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(tmpSessionInfo.RollerPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", tmpSessionInfo.RollerName,
|
||||
"roller pk", tmpSessionInfo.RollerPublicKey, "prove type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAttempts use the count of session info to check the attempts
|
||||
func (m *Manager) checkAttempts(hash string) bool {
|
||||
sessionInfos, err := m.orm.GetSessionInfosByHashes([]string{hash})
|
||||
if err != nil {
|
||||
log.Error("get session info error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sessionInfos) >= int(m.cfg.SessionAttempts) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
coordinatorSessionsActiveNumberGauge.Inc(1)
|
||||
@@ -433,47 +447,52 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
//Execute after timeout, set in config.json. Consider all rollers failed.
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
// Check if session can be replayed
|
||||
if sess.info.Attempts < m.cfg.SessionAttempts {
|
||||
if len(sess.sessionInfos) == 0 {
|
||||
log.Error("there is no session info in session")
|
||||
return
|
||||
}
|
||||
|
||||
if !m.checkAttempts(sess.sessionInfos[0].TaskID) {
|
||||
var success bool
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
|
||||
success = m.StartAggProofGenerationSession(nil, sess)
|
||||
} else if sess.info.ProveType == message.BasicProve {
|
||||
} else if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
|
||||
success = m.StartBasicProofGenerationSession(nil, sess)
|
||||
}
|
||||
if success {
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
log.Info("Retrying session", "session id:", sess.info.ID)
|
||||
log.Info("Retrying session", "session id:", sess.sessionInfos[0].TaskID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
log.Warn(errMsg, "session id", sess.sessionInfos[0].TaskID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if sess.info.ProveType == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(sess.sessionInfos[0].TaskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset basic task_status as Unassigned", "id", sess.sessionInfos[0].TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if sess.info.ProveType == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.info.ID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
if message.ProveType(sess.sessionInfos[0].ProveType) == message.AggregatorProve {
|
||||
if err := m.orm.UpdateAggTaskStatus(sess.sessionInfos[0].TaskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset aggregator task_status as Unassigned", "id", sess.sessionInfos[0].TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, v := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(v.RollerPublicKey, v.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
delete(m.sessions, sess.sessionInfos[0].TaskID)
|
||||
m.mu.Unlock()
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
return
|
||||
@@ -481,7 +500,12 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
for idx := range sess.sessionInfos {
|
||||
if sess.sessionInfos[idx].RollerPublicKey == ret.pk {
|
||||
sess.sessionInfos[idx].ProvingStatus = int(ret.status)
|
||||
}
|
||||
}
|
||||
|
||||
if sess.isSessionFailed() {
|
||||
if ret.typ == message.BasicProve {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
|
||||
@@ -493,12 +517,13 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
|
||||
if err := m.orm.UpdateSessionInfoProvingStatus(m.ctx, ret.id, ret.pk, ret.status); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
}
|
||||
|
||||
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
|
||||
finished, validRollers := sess.isRollersFinished()
|
||||
|
||||
@@ -508,11 +533,10 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
randIndex := rand.Int63n(int64(len(validRollers)))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
m.freeTaskIDForRoller(sessionInfo.RollerPublicKey, sessionInfo.TaskID)
|
||||
delete(m.sessions, sessionInfo.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
|
||||
coordinatorSessionsSuccessTotalCounter.Inc(1)
|
||||
@@ -528,14 +552,16 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
|
||||
func (s *session) isRollersFinished() (bool, []string) {
|
||||
var validRollers []string
|
||||
for pk, roller := range s.info.Rollers {
|
||||
if roller.Status == types.RollerProofValid {
|
||||
validRollers = append(validRollers, pk)
|
||||
for _, sessionInfo := range s.sessionInfos {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
validRollers = append(validRollers, sessionInfo.RollerPublicKey)
|
||||
continue
|
||||
}
|
||||
if roller.Status == types.RollerProofInvalid {
|
||||
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
|
||||
continue
|
||||
}
|
||||
|
||||
// Some rollers are still proving.
|
||||
return false, nil
|
||||
}
|
||||
@@ -543,8 +569,8 @@ func (s *session) isRollersFinished() (bool, []string) {
|
||||
}
|
||||
|
||||
func (s *session) isSessionFailed() bool {
|
||||
for _, roller := range s.info.Rollers {
|
||||
if roller.Status != types.RollerProofInvalid {
|
||||
for _, sessionInfo := range s.sessionInfos {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -573,7 +599,9 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
if task != nil {
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
if len(prevSession.sessionInfos) != 0 {
|
||||
taskID = prevSession.sessionInfos[0].TaskID
|
||||
}
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
|
||||
@@ -612,7 +640,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.BasicProve)
|
||||
if roller == nil {
|
||||
@@ -626,10 +654,27 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
now := time.Now()
|
||||
tmpSessionInfo := types.SessionInfo{
|
||||
TaskID: taskID,
|
||||
RollerPublicKey: roller.PublicKey,
|
||||
ProveType: int(message.BasicProve),
|
||||
RollerName: roller.Name,
|
||||
CreatedAt: &now,
|
||||
ProvingStatus: int(types.RollerAssigned),
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
sessionInfos = append(sessionInfos, &tmpSessionInfo)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.BasicProve, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
|
||||
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
if len(sessionInfos) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
return false
|
||||
}
|
||||
@@ -642,33 +687,8 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.BasicProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -685,7 +705,9 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
if task != nil {
|
||||
taskID = task.ID
|
||||
} else {
|
||||
taskID = prevSession.info.ID
|
||||
if len(prevSession.sessionInfos) > 0 {
|
||||
taskID = prevSession.sessionInfos[0].TaskID
|
||||
}
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
|
||||
@@ -715,7 +737,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
}
|
||||
|
||||
// Dispatch task to basic rollers.
|
||||
rollers := make(map[string]*types.RollerStatus)
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.AggregatorProve)
|
||||
if roller == nil {
|
||||
@@ -732,11 +754,29 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
tmpSessionInfo := types.SessionInfo{
|
||||
TaskID: taskID,
|
||||
RollerPublicKey: roller.PublicKey,
|
||||
ProveType: int(message.AggregatorProve),
|
||||
RollerName: roller.Name,
|
||||
CreatedAt: &now,
|
||||
ProvingStatus: int(types.RollerAssigned),
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(&tmpSessionInfo); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
|
||||
sessionInfos = append(sessionInfos, &tmpSessionInfo)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.AggregatorProve, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", tmpSessionInfo.ProvingStatus)
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
if len(sessionInfos) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
return false
|
||||
}
|
||||
@@ -749,33 +789,8 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.AggregatorProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
Attempts: 1,
|
||||
},
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
if prevSession != nil {
|
||||
sess.info.Attempts += prevSession.info.Attempts
|
||||
}
|
||||
|
||||
for _, roller := range sess.info.Rollers {
|
||||
log.Info(
|
||||
"assigned proof to roller",
|
||||
"session id", sess.info.ID,
|
||||
"session type", sess.info.ProveType,
|
||||
"roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey,
|
||||
"proof status", roller.Status)
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
|
||||
return false
|
||||
sessionInfos: sessionInfos,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -789,7 +804,11 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
func (m *Manager) addFailedSession(sess *session, errMsg string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
if len(sess.sessionInfos) == 0 {
|
||||
log.Error("add failed session to manager failure as the empty session info")
|
||||
return
|
||||
}
|
||||
m.failedSessionInfos[sess.sessionInfos[0].TaskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
|
||||
@@ -53,8 +53,8 @@ func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
|
||||
defer m.mu.RUnlock()
|
||||
taskIDs := cmap.New()
|
||||
for id, sess := range m.sessions {
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if pk == pubkey && roller.Status == types.RollerAssigned {
|
||||
for _, sessionInfo := range sess.sessionInfos {
|
||||
if sessionInfo.RollerPublicKey == pubkey && sessionInfo.ProvingStatus == int(types.RollerAssigned) {
|
||||
taskIDs.Set(id, struct{}{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package verifier_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -16,23 +17,23 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
aggVkPath = "../assets/agg_vk"
|
||||
proofPath = "../assets/agg_proof"
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
aggVkPath = flag.String("vk", "/assets/agg_vk", "aggregation proof verification key path")
|
||||
proofPath = flag.String("proof", "/assets/agg_proof", "aggregation proof path")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: paramsPath,
|
||||
AggVkPath: aggVkPath,
|
||||
ParamsPath: *paramsPath,
|
||||
AggVkPath: *aggVkPath,
|
||||
}
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
f, err := os.Open(proofPath)
|
||||
f, err := os.Open(*proofPath)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -3,12 +3,21 @@
|
||||
|
||||
create table session_info
|
||||
(
|
||||
hash VARCHAR NOT NULL,
|
||||
rollers_info BYTEA NOT NULL
|
||||
);
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
task_id VARCHAR NOT NULL,
|
||||
roller_public_key VARCHAR NOT NULL,
|
||||
prove_type INTEGER DEFAULT 0,
|
||||
roller_name VARCHAR NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
failure_type INTEGER DEFAULT 0,
|
||||
reward BIGINT DEFAULT 0,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
create unique index session_info_hash_uindex
|
||||
on session_info (hash);
|
||||
CONSTRAINT uk_session_unique UNIQUE (task_id, roller_public_key)
|
||||
);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ type BlockTraceOrm interface {
|
||||
type SessionInfoOrm interface {
|
||||
GetSessionInfosByHashes(hashes []string) ([]*types.SessionInfo, error)
|
||||
SetSessionInfo(rollersInfo *types.SessionInfo) error
|
||||
UpdateSessionInfoProvingStatus(ctx context.Context, taskID string, pk string, status types.RollerProveStatus) error
|
||||
}
|
||||
|
||||
// AggTaskOrm is aggregator task
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"context"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type sessionInfoOrm struct {
|
||||
@@ -23,7 +23,7 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
query, args, err := sqlx.In("SELECT rollers_info FROM session_info WHERE hash IN (?);", hashes)
|
||||
query, args, err := sqlx.In("SELECT * FROM session_info WHERE task_id IN (?);", hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -35,15 +35,11 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
|
||||
var sessionInfos []*types.SessionInfo
|
||||
for rows.Next() {
|
||||
var infoBytes []byte
|
||||
if err = rows.Scan(&infoBytes); err != nil {
|
||||
var sessionInfo types.SessionInfo
|
||||
if err = rows.StructScan(&sessionInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionInfo := &types.SessionInfo{}
|
||||
if err = json.Unmarshal(infoBytes, sessionInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionInfos = append(sessionInfos, sessionInfo)
|
||||
sessionInfos = append(sessionInfos, &sessionInfo)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
@@ -53,11 +49,15 @@ func (o *sessionInfoOrm) GetSessionInfosByHashes(hashes []string) ([]*types.Sess
|
||||
}
|
||||
|
||||
func (o *sessionInfoOrm) SetSessionInfo(rollersInfo *types.SessionInfo) error {
|
||||
infoBytes, err := json.Marshal(rollersInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStr := "INSERT INTO session_info (hash, rollers_info) VALUES ($1, $2) ON CONFLICT (hash) DO UPDATE SET rollers_info = EXCLUDED.rollers_info;"
|
||||
_, err = o.db.Exec(sqlStr, rollersInfo.ID, infoBytes)
|
||||
sqlStr := "INSERT INTO session_info (task_id, roller_public_key, prove_type, roller_name, proving_status) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (task_id, roller_public_key) DO UPDATE SET proving_status = EXCLUDED.proving_status;"
|
||||
_, err := o.db.Exec(sqlStr, rollersInfo.TaskID, rollersInfo.RollerPublicKey, rollersInfo.ProveType, rollersInfo.RollerName, rollersInfo.ProvingStatus)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateSessionInfoProvingStatus update the session info proving status
|
||||
func (o *sessionInfoOrm) UpdateSessionInfoProvingStatus(ctx context.Context, taskID string, pk string, status types.RollerProveStatus) error {
|
||||
if _, err := o.db.ExecContext(ctx, o.db.Rebind("update session_info set proving_status = ? where task_id = ? and roller_public_key = ?;"), int(status), taskID, pk); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -410,31 +410,29 @@ func testOrmSessionInfo(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
|
||||
now := time.Now()
|
||||
sessionInfo := types.SessionInfo{
|
||||
ID: batchHash,
|
||||
Rollers: map[string]*types.RollerStatus{
|
||||
"0": {
|
||||
PublicKey: "0",
|
||||
Name: "roller-0",
|
||||
Status: types.RollerAssigned,
|
||||
},
|
||||
},
|
||||
StartTimestamp: time.Now().Unix()}
|
||||
TaskID: batchHash,
|
||||
RollerName: "roller-0",
|
||||
RollerPublicKey: "0",
|
||||
ProvingStatus: int(types.RollerAssigned),
|
||||
CreatedAt: &now,
|
||||
}
|
||||
|
||||
// insert
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
assert.Equal(t, sessionInfo.RollerName, sessionInfos[0].RollerName)
|
||||
|
||||
// update
|
||||
sessionInfo.Rollers["0"].Status = types.RollerProofValid
|
||||
sessionInfo.ProvingStatus = int(types.RollerProofValid)
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
sessionInfos, err = ormSession.GetSessionInfosByHashes(hashes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
assert.Equal(t, sessionInfo.ProvingStatus, sessionInfos[0].ProvingStatus)
|
||||
|
||||
// delete
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchHash, types.ProvingTaskVerified))
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -4,6 +4,7 @@ package prover_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,23 +17,23 @@ import (
|
||||
"scroll-tech/roller/prover"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
proofDumpPath = "agg_proof"
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
seedPath = flag.String("seed", "/assets/test_seed", "seed path")
|
||||
tracesPath = flag.String("traces", "/assets/traces", "traces dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/agg_proof", "the path proofs dump to")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.ProverConfig{
|
||||
ParamsPath: paramsPath,
|
||||
SeedPath: seedPath,
|
||||
ParamsPath: *paramsPath,
|
||||
SeedPath: *seedPath,
|
||||
}
|
||||
prover, err := prover.NewProver(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
files, err := os.ReadDir(tracesPath)
|
||||
files, err := os.ReadDir(*tracesPath)
|
||||
as.NoError(err)
|
||||
|
||||
traces := make([]*types.BlockTrace, 0)
|
||||
@@ -41,7 +42,7 @@ func TestFFI(t *testing.T) {
|
||||
f *os.File
|
||||
byt []byte
|
||||
)
|
||||
f, err = os.Open(filepath.Join(tracesPath, file.Name()))
|
||||
f, err = os.Open(filepath.Join(*tracesPath, file.Name()))
|
||||
as.NoError(err)
|
||||
byt, err = io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
@@ -54,10 +55,10 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("prove success")
|
||||
|
||||
// dump the proof
|
||||
os.RemoveAll(proofDumpPath)
|
||||
os.RemoveAll(*proofDumpPath)
|
||||
proofByt, err := json.Marshal(proof)
|
||||
as.NoError(err)
|
||||
proofFile, err := os.Create(proofDumpPath)
|
||||
proofFile, err := os.Create(*proofDumpPath)
|
||||
as.NoError(err)
|
||||
_, err = proofFile.Write(proofByt)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -231,30 +231,38 @@ func (r *Roller) prove() error {
|
||||
|
||||
var traces []*types.BlockTrace
|
||||
traces, err = r.getSortedTracesByHashes(task.Task.BlockHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
|
||||
// the proof will not be submitted.
|
||||
var proof *message.AggProof
|
||||
proof, err = r.prover.Prove(task.Task.ID, traces)
|
||||
if err != nil {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusProofError,
|
||||
Error: err.Error(),
|
||||
Error: "get traces failed",
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: &message.AggProof{},
|
||||
Proof: nil,
|
||||
}
|
||||
log.Error("prove block failed!", "task-id", task.Task.ID)
|
||||
log.Error("get traces failed!", "task-id", task.Task.ID, "err", err)
|
||||
} else {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusOk,
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: proof,
|
||||
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
|
||||
// the proof will not be submitted.
|
||||
var proof *message.AggProof
|
||||
proof, err = r.prover.Prove(task.Task.ID, traces)
|
||||
if err != nil {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusProofError,
|
||||
Error: err.Error(),
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: nil,
|
||||
}
|
||||
log.Error("prove block failed!", "task-id", task.Task.ID)
|
||||
} else {
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusOk,
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: proof,
|
||||
}
|
||||
log.Info("prove block successfully!", "task-id", task.Task.ID)
|
||||
}
|
||||
log.Info("prove block successfully!", "task-id", task.Task.ID)
|
||||
}
|
||||
} else {
|
||||
// when the roller has more than 3 times panic,
|
||||
@@ -264,7 +272,7 @@ func (r *Roller) prove() error {
|
||||
Error: "zk proving panic",
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Proof: &message.AggProof{},
|
||||
Proof: nil,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user