Compare commits

..

3 Commits

Author SHA1 Message Date
Péter Garamvölgyi
65e0b671ff feat: import genesis batch during startup (#299)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-02-18 23:29:34 +01:00
HAOYUatHZ
3849d1bcc9 build: update version to alpha-v1.0 (#301) 2023-02-18 18:59:23 +08:00
Lawliet-Chan
f33bfffd85 feat(roller&coordinator): upgrade lizkp to zkevm-0215 version (#281)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: Ubuntu <ubuntu@ip-172-31-9-248.us-west-2.compute.internal>
2023-02-18 18:53:57 +08:00
50 changed files with 45417 additions and 86099 deletions

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
@@ -42,6 +42,8 @@ jobs:
- name: Test
run: |
make roller
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
export CHAIN_ID=534353
go test -v ./...
check:
runs-on: ubuntu-latest

2
Jenkinsfile vendored
View File

@@ -13,6 +13,8 @@ pipeline {
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
CHAIN_ID='534353'
// LOG_DOCKER = 'true'
}
stages {

View File

@@ -1,5 +1,7 @@
.PHONY: check update dev_docker clean
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -29,5 +31,15 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -16,8 +16,7 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000,
"pending_limit": 500
"min_balance": 100000000000000000000
},
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
@@ -40,8 +39,7 @@
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000,
"pending_limit": 500
"min_balance": 100000000000000000000
},
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"

View File

@@ -30,8 +30,7 @@ type SenderConfig struct {
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
PendingLimit int64 `json:"pending_limit,omitempty"`
MinBalance *big.Int `json:"min_balance,omitempty"`
}
// RelayerConfig loads relayer configuration items.

View File

@@ -3,17 +3,15 @@ package l1
import (
"context"
"errors"
"fmt"
"math/big"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"modernc.org/mathutil"
"scroll-tech/common/utils"
"scroll-tech/database/orm"
@@ -57,7 +55,7 @@ func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.Rela
return nil, err
}
layer1 := &Layer1Relayer{
return &Layer1Relayer{
ctx: ctx,
sender: sender,
db: db,
@@ -65,89 +63,7 @@ func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.Rela
cfg: cfg,
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
}
// Deal with broken transactions.
if err = layer1.prepare(ctx); err != nil {
return nil, err
}
return layer1, nil
}
// prepare to run check logic and until it's finished.
func (r *Layer1Relayer) prepare(ctx context.Context) error {
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.confirmationCh:
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}(ctx)
if err := r.checkSubmittedMessages(); err != nil {
log.Error("failed to init layer1 submitted tx", "err", err)
return err
}
// Wait forever util sender is empty.
utils.TryTimes(-1, func() bool {
return r.sender.PendingCount() == 0
})
return nil
}
func (r *Layer1Relayer) checkSubmittedMessages() error {
var blockNumber uint64
BEGIN:
msgs, err := r.db.GetL1Messages(
map[string]interface{}{"status": orm.MsgSubmitted},
fmt.Sprintf("AND height > %d", blockNumber),
fmt.Sprintf("ORDER BY height ASC LIMIT %d", 100),
)
if err != nil || len(msgs) == 0 {
return err
}
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
// If pending txs pool is full, wait a while and retry.
if r.sender.IsFull() {
log.Warn("layer1 sender pending tx reaches pending limit")
time.Sleep(time.Millisecond * 500)
continue
}
msg, msgs = msgs[0], msgs[1:]
blockNumber = mathutil.MaxUint64(blockNumber, msg.Height)
data, err := r.packRelayMessage(msg)
if err != nil {
continue
}
err = r.sender.LoadOrSendTx(
common.HexToHash(msg.Layer2Hash),
msg.MsgHash,
&r.cfg.MessengerContractAddress,
big.NewInt(0),
data,
)
if err != nil {
log.Error("failed to load or send l1 submitted tx", "msg hash", msg.MsgHash, "err", err)
}
}
goto BEGIN
}, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
@@ -173,7 +89,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
}
}
func (r *Layer1Relayer) packRelayMessage(msg *orm.L1Message) ([]byte, error) {
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// @todo add support to relay multiple messages
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
@@ -191,16 +107,9 @@ func (r *Layer1Relayer) packRelayMessage(msg *orm.L1Message) ([]byte, error) {
if err != nil {
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return nil, err
}
return data, nil
}
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
data, err := r.packRelayMessage(msg)
if err != nil {
return err
}
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)

View File

@@ -27,13 +27,15 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
// Note: initialize watcher before relayer to keep DB consistent.
// Otherwise, there will be a race condition between watcher.initializeGenesis and relayer.ProcessPendingBatches.
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
return &Backend{
cfg: cfg,
l2Watcher: l2Watcher,

View File

@@ -58,7 +58,7 @@ func (w *batchProposer) tryProposeBatch() {
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
if _, err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
@@ -66,7 +66,7 @@ func (w *batchProposer) tryProposeBatch() {
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
if _, err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
@@ -93,15 +93,15 @@ func (w *batchProposer) tryProposeBatch() {
return
}
if err = w.createBatchForBlocks(blocks); err != nil {
if _, err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) (string, error) {
dbTx, err := w.orm.Beginx()
if err != nil {
return err
return "", err
}
var dbTxErr error
@@ -128,13 +128,13 @@ func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
batchID, dbTxErr = w.orm.NewBatchInDBTx(dbTx, startBlock, endBlock, startBlock.ParentHash, txNum, gasUsed)
if dbTxErr != nil {
return dbTxErr
return "", dbTxErr
}
if dbTxErr = w.orm.SetBatchIDForBlocksInDBTx(dbTx, blockIDs, batchID); dbTxErr != nil {
return dbTxErr
return "", dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
return batchID, dbTxErr
}

View File

@@ -40,7 +40,7 @@ func testBatchProposer(t *testing.T) {
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(0))
proposer := newBatchProposer(&config.BatchProposerConfig{
ProofGenerationFreq: 1,

View File

@@ -2,20 +2,28 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/utils"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
)
// Layer2Relayer is responsible for
@@ -30,11 +38,13 @@ type Layer2Relayer struct {
db database.OrmFactory
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
@@ -66,65 +76,326 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
return nil, err
}
layer2 := &Layer2Relayer{
return &Layer2Relayer{
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}
// Deal with broken transactions.
if err = layer2.prepare(ctx); err != nil {
return nil, err
}
return layer2, nil
}, nil
}
// prepare to run check logic and until it's finished.
func (r *Layer2Relayer) prepare(ctx context.Context) error {
go func(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
}(ctx)
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
return
}
}
}
if err := r.checkSubmittedMessages(); err != nil {
log.Error("failed to init layer2 submitted tx", "err", err)
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
}
if err := r.checkCommittingBatches(); err != nil {
log.Error("failed to init layer2 committed tx", "err", err)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
}
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
if err := r.checkFinalizingBatches(); err != nil {
log.Error("failed to init layer2 finalized tx", "err", err)
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
// Wait forever until message sender and roller sender are empty.
utils.TryTimes(-1, func() bool {
return r.messageSender.PendingCount() == 0 && r.rollupSender.PendingCount() == 0
})
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
if len(batchesInDB) == 0 {
return
}
id := batchesInDB[0]
// @todo add support to relay multiple batches
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
return
}
batch := batches[0]
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
if err != nil || len(traces) == 0 {
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
return
}
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
BatchIndex: batch.Index,
ParentHash: common.HexToHash(batch.ParentHash),
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
}
parentHash := common.HexToHash(batch.ParentHash)
for i, trace := range traces {
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: trace.Header.Hash(),
ParentHash: parentHash,
BaseFee: trace.Header.BaseFee,
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.Header.Number.Uint64(),
GasUsed: 0,
Timestamp: trace.Header.Time,
ExtraData: make([]byte, 0),
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
}
for j, tx := range trace.Transactions {
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
R: tx.R.ToInt(),
S: tx.S.ToInt(),
V: tx.V.ToInt().Uint64(),
}
if tx.To != nil {
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
}
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
}
// for next iteration
parentHash = layer2Batch.Blocks[i].BlockHash
}
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
if err != nil {
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
return
}
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment.Store(txID, id)
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) == 0 {
return
}
id := batches[0]
// @todo add support to relay multiple batches
status, err := r.db.GetProvingStatusByID(id)
if err != nil {
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
return
}
switch status {
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case orm.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
case orm.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "id", id)
success := false
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
if err != nil {
log.Warn("fetch get proof by id failed", "id", id, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "id", id)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
return
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
}
return
}
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
}
}
// Start the relayer process
func (r *Layer2Relayer) Start() {
loop := func(ctx context.Context, f func()) {

View File

@@ -1,171 +0,0 @@
package l2
import (
"errors"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"modernc.org/mathutil"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/sender"
"scroll-tech/database/orm"
)
func (r *Layer2Relayer) checkCommittingBatches() error {
var batchIndex uint64
BEGIN:
batches, err := r.db.GetBlockBatches(
map[string]interface{}{"rollup_status": orm.RollupCommitting},
fmt.Sprintf("AND index > %d", batchIndex),
fmt.Sprintf("ORDER BY index ASC LIMIT %d", 10),
)
if err != nil || len(batches) == 0 {
return err
}
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
// If pending txs pool is full, wait a while and retry.
if r.rollupSender.IsFull() {
log.Warn("layer2 rollup sender pending committed tx reaches pending limit")
time.Sleep(time.Millisecond * 500)
continue
}
batch, batches = batches[0], batches[1:]
id := batch.ID
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
txStr, err := r.db.GetCommitTxHash(id)
if err != nil {
log.Error("failed to get commit_tx_hash from block_batch", "err", err)
continue
}
_, data, err := r.packCommitBatch(id)
if err != nil {
log.Error("failed to load or send committed tx", "batch id", id, "err", err)
continue
}
txID := id + "-commit"
err = r.rollupSender.LoadOrSendTx(
common.HexToHash(txStr.String),
txID,
&r.cfg.RollupContractAddress,
big.NewInt(0),
data,
)
if err != nil {
log.Error("failed to load or send tx", "batch id", id, "err", err)
} else {
r.processingCommitment.Store(txID, id)
}
}
goto BEGIN
}
func (r *Layer2Relayer) packCommitBatch(id string) (*orm.BlockBatch, []byte, error) {
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
return nil, nil, err
}
batch := batches[0]
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
if err != nil || len(traces) == 0 {
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
return nil, nil, err
}
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
BatchIndex: batch.Index,
ParentHash: common.HexToHash(batch.ParentHash),
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
}
parentHash := common.HexToHash(batch.ParentHash)
for i, trace := range traces {
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: trace.Header.Hash(),
ParentHash: parentHash,
BaseFee: trace.Header.BaseFee,
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.Header.Number.Uint64(),
GasUsed: 0,
Timestamp: trace.Header.Time,
ExtraData: make([]byte, 0),
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
}
for j, tx := range trace.Transactions {
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
R: tx.R.ToInt(),
S: tx.S.ToInt(),
V: tx.V.ToInt().Uint64(),
}
if tx.To != nil {
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
}
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
}
// for next iteration
parentHash = layer2Batch.Blocks[i].BlockHash
}
data, err := bridge_abi.RollupMetaABI.Pack("commitBatch", layer2Batch)
if err != nil {
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
return nil, nil, err
}
return batch, data, nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
if len(batchesInDB) == 0 {
return
}
id := batchesInDB[0]
// @todo add support to relay multiple batches
batch, data, err := r.packCommitBatch(id)
if err != nil {
return
}
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment.Store(txID, id)
}

View File

@@ -1,195 +0,0 @@
package l2
import (
"errors"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"modernc.org/mathutil"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
"scroll-tech/database/orm"
)
func (r *Layer2Relayer) checkFinalizingBatches() error {
var (
batchLimit = 10
batchIndex uint64
)
BEGIN:
batches, err := r.db.GetBlockBatches(
map[string]interface{}{"rollup_status": orm.RollupFinalizing},
fmt.Sprintf("AND index > %d", batchIndex),
fmt.Sprintf("ORDER BY index ASC LIMIT %d", batchLimit),
)
if err != nil || len(batches) == 0 {
return err
}
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
// If pending txs pool is full, wait a while and retry.
if r.rollupSender.IsFull() {
log.Warn("layer2 rollup sender pending finalized tx reaches pending limit")
time.Sleep(time.Millisecond * 500)
continue
}
batch, batches = batches[0], batches[1:]
id := batch.ID
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
txStr, err := r.db.GetFinalizeTxHash(id)
if err != nil {
log.Error("failed to get finalize_tx_hash from block_batch", "err", err)
continue
}
data, err := r.packFinalizeBatch(id)
if err != nil {
log.Error("failed to pack finalize data", "err", err)
continue
}
txID := id + "-finalize"
err = r.rollupSender.LoadOrSendTx(
common.HexToHash(txStr.String),
txID,
&r.cfg.RollupContractAddress,
big.NewInt(0),
data,
)
if err != nil {
log.Error("failed to load or send finalized tx", "batch id", id, "err", err)
} else {
r.processingFinalization.Store(txID, id)
}
}
goto BEGIN
}
func (r *Layer2Relayer) packFinalizeBatch(id string) ([]byte, error) {
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
if err != nil {
log.Warn("fetch get proof by id failed", "id", id, "err", err)
return nil, err
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "id", id)
return nil, err
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
return nil, err
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
return nil, err
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
data, err := bridge_abi.RollupMetaABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return nil, err
}
return data, nil
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) == 0 {
return
}
id := batches[0]
// @todo add support to relay multiple batches
status, err := r.db.GetProvingStatusByID(id)
if err != nil {
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
return
}
switch status {
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case orm.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
case orm.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "id", id)
success := false
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
}
}()
// Pack finalize data.
data, err := r.packFinalizeBatch(id)
if err != nil {
return
}
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
}
return
}
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
}
}

View File

@@ -1,183 +0,0 @@
package l2
import (
"errors"
"fmt"
"math/big"
"runtime"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/sender"
"scroll-tech/database/orm"
)
const processMsgLimit = 100
func (r *Layer2Relayer) checkSubmittedMessages() error {
var nonce uint64
BEGIN:
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgSubmitted},
fmt.Sprintf("AND nonce > %d", nonce),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil || len(msgs) == 0 {
return err
}
var batch *orm.BlockBatch
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
// If pending pool is full, wait a while and retry.
if r.messageSender.IsFull() {
log.Warn("layer2 message tx sender is full")
time.Sleep(time.Millisecond * 500)
continue
}
msg, msgs = msgs[0], msgs[1:]
nonce = mathutil.MaxUint64(nonce, msg.Nonce)
// Get batch by block number.
if batch == nil || msg.Height < batch.StartBlockNumber || msg.Height > batch.EndBlockNumber {
batches, err := r.db.GetBlockBatches(
map[string]interface{}{},
fmt.Sprintf("AND start_block_number <= %d AND end_block_number >= %d", msg.Height, msg.Height),
)
// If get batch failed, stop and return immediately.
if err != nil || len(batches) == 0 {
return err
}
batch = batches[0]
}
data, err := r.packRelayMessage(msg, batch.Index)
if err != nil {
continue
}
err = r.messageSender.LoadOrSendTx(
common.HexToHash(msg.Layer1Hash),
msg.MsgHash,
&r.cfg.MessengerContractAddress,
big.NewInt(0),
data,
)
if err != nil {
log.Error("failed to load or send l2 submitted tx", "batch id", batch.ID, "msg hash", msg.MsgHash, "err", err)
} else {
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
}
}
goto BEGIN
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
return
}
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
data, err := r.packRelayMessage(msg, index)
if err != nil {
return err
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
}
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
func (r *Layer2Relayer) packRelayMessage(msg *orm.L2Message, index uint64) ([]byte, error) {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := bridge_abi.L1MessengerMetaABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return nil, err
}
return data, nil
}

View File

@@ -67,7 +67,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
savedHeight = 0
}
return &WatcherClient{
w := WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
@@ -79,6 +79,75 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
}
// Initialize genesis before we do anything else
if err := w.initializeGenesis(); err != nil {
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
}
return &w
}
func (w *WatcherClient) initializeGenesis() error {
if count, err := w.orm.GetBatchCount(); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
log.Info("genesis already imported")
return nil
}
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
if err != nil {
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
}
// EIP1559 is disabled so the RPC won't return baseFeePerGas. However, l2geth
// still uses BaseFee when calculating the block hash. If we keep it as <nil>
// here the genesis hash will not match.
genesis.BaseFee = big.NewInt(0)
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
trace := &types.BlockTrace{
Coinbase: nil,
Header: genesis,
Transactions: []*types.TransactionData{},
StorageTrace: nil,
ExecutionResults: []*types.ExecutionResult{},
MPTWitness: nil,
}
if err := w.orm.InsertBlockTraces([]*types.BlockTrace{trace}); err != nil {
return fmt.Errorf("failed to insert block traces: %v", err)
}
blocks, err := w.orm.GetUnbatchedBlocks(map[string]interface{}{})
if err != nil {
return err
}
if len(blocks) != 1 {
return fmt.Errorf("unexpected number of unbatched blocks in db, expected: 1, actual: %v", len(blocks))
}
batchID, err := w.batchProposer.createBatchForBlocks(blocks)
if err != nil {
return fmt.Errorf("failed to create batch: %v", err)
}
err = w.orm.UpdateProvingStatus(batchID, orm.ProvingTaskProved)
if err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
}
err = w.orm.UpdateRollupStatus(w.ctx, batchID, orm.RollupFinalized)
if err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
}
log.Info("successfully imported genesis batch")
return nil
}
// Start the Listening process

View File

@@ -88,7 +88,6 @@ type Sender struct {
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingNum int64 // current pending tx count.
pendingTxs sync.Map // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
@@ -150,16 +149,6 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
return sender, nil
}
// PendingCount return the current pending txs num.
func (s *Sender) PendingCount() int64 {
return atomic.LoadInt64(&s.pendingNum)
}
// PendingLimit return the maximum pendingTxs can handle.
func (s *Sender) PendingLimit() int64 {
return s.config.PendingLimit
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
@@ -210,27 +199,16 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
}, nil
}
// IsFull If pendingTxs pool is full return true.
func (s *Sender) IsFull() bool {
return atomic.LoadInt64(&s.pendingNum) == s.config.PendingLimit
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
if s.IsFull() {
return common.Hash{}, fmt.Errorf("pending txs is full, pending size: %d", s.config.PendingLimit)
}
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
atomic.AddInt64(&s.pendingNum, 1)
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
atomic.AddInt64(&s.pendingNum, -1)
return common.Hash{}, ErrNoAvailableAccount
}
@@ -238,7 +216,6 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
atomic.AddInt64(&s.pendingNum, -1)
}
}()
@@ -266,61 +243,6 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
return
}
func (s *Sender) getTxAndAddr(txHash common.Hash) (*types.Transaction, uint64, common.Address, error) {
tx, isPending, err := s.client.TransactionByHash(s.ctx, txHash)
if err != nil {
return nil, 0, common.Address{}, err
}
sender, err := types.Sender(types.LatestSignerForChainID(s.chainID), tx)
if err != nil {
return nil, 0, common.Address{}, err
}
if isPending {
return tx, s.blockNumber, sender, nil
}
receipt, err := s.client.TransactionReceipt(s.ctx, txHash)
if err != nil {
return nil, 0, common.Address{}, err
}
return tx, receipt.BlockNumber.Uint64(), sender, nil
}
// LoadOrSendTx If the tx already exist in chain load it or resend it.
func (s *Sender) LoadOrSendTx(destTxHash common.Hash, ID string, target *common.Address, value *big.Int, data []byte) error {
tx, blockNumber, from, err := s.getTxAndAddr(destTxHash)
// If this tx already exist load it to the pending.
if err == nil && tx != nil {
auth := s.auths.accounts[from]
var feeData *FeeData
feeData, err = s.getFeeData(auth, target, value, data)
if err != nil {
return err
}
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
return fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
atomic.AddInt64(&s.pendingNum, 1)
s.pendingTxs.Store(ID, &PendingTransaction{
tx: tx,
id: ID,
signer: auth,
// Record the transaction's block blockNumber.
submitAt: blockNumber,
feeData: feeData,
})
return nil
}
// Tx is dropped from chain node, resend it.
_, err = s.SendTransaction(ID, target, value, data)
return err
}
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
var (
nonce = auth.Nonce.Uint64()
@@ -466,7 +388,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
s.pendingTxs.Delete(key)
atomic.AddInt64(&s.pendingNum, -1)
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
@@ -498,7 +419,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
atomic.AddInt64(&s.pendingNum, -1)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {

View File

@@ -49,7 +49,6 @@ func TestSender(t *testing.T) {
// Setup
setupEnv(t)
t.Run("testLoadOrSendTx", testLoadOrSendTx)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
@@ -60,38 +59,6 @@ func TestSender(t *testing.T) {
})
}
func testLoadOrSendTx(t *testing.T) {
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = 0
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
}
newSender2, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
}
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := "aaa"
hash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(0), nil)
assert.NoError(t, err)
err = newSender2.LoadOrSendTx(hash, id, &toAddr, big.NewInt(0), nil)
assert.NoError(t, err)
select {
case cfm := <-newSender2.ConfirmChan():
assert.Equal(t, true, cfm.IsSuccessful)
assert.Equal(t, hash, cfm.TxHash)
assert.Equal(t, id, cfm.ID)
case <-time.After(time.Second * 10):
t.Error("testLoadOrSendTx test failed because of timeout")
}
}
func testBatchSender(t *testing.T, batchSize int) {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as chef
WORKDIR app
FROM chef as planner
@@ -13,10 +13,11 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as base
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
@@ -32,12 +33,14 @@ RUN go mod download -x
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/verifier/lib
# ENV CHAIN_ID=534353
RUN mkdir -p /src/coordinator/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
COPY --from=builder /bin/coordinator /bin/

View File

@@ -11,6 +11,6 @@ if [ ! -n "${IPC_PATH}" ];then
IPC_PATH="/tmp/l1geth_path.ipc"
fi
exec geth --mine --datadir "." --unlock 0 --miner.etherbase "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63" --password "./password" --allow-insecure-unlock --nodiscover \
exec geth --mine --datadir "." --unlock 0 --password "./password" --allow-insecure-unlock --nodiscover \
--http --http.addr "0.0.0.0" --http.port 8545 --ws --ws.addr "0.0.0.0" --ws.port 8546 --ipcpath ${IPC_PATH}

File diff suppressed because it is too large Load Diff

View File

@@ -5,11 +5,18 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["dylib"]
crate-type = ["staticlib"]
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[patch."https://github.com/scroll-tech/zktrie.git"]
zktrie = { git = "https://github.com/lispc/zktrie", branch = "scroll-dev-0215" }
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -1 +1 @@
nightly-2022-08-23
nightly-2022-12-10

View File

@@ -44,7 +44,7 @@ pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *c
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_multi(traces.as_slice())
.create_agg_circuit_proof_batch(traces.as_slice())
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)

View File

@@ -214,8 +214,9 @@ func (z *ProofDetail) Hash() ([]byte, error) {
// AggProof includes the proof and public input that are required to verification and rollup.
type AggProof struct {
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
Proof []byte `json:"proof"`
Instance []byte `json:"instance"`
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
BlockCount uint `json:"block_count"`
}

View File

@@ -4,7 +4,7 @@ import "time"
// TryTimes try run several times until the function return true.
func TryTimes(times int, run func() bool) {
for i := 0; times == -1 || i < times; i++ {
for i := 0; i < times; i++ {
if run() {
return
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v13.2"
var tag = "alpha-v1.1"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -337,7 +337,7 @@ library RollupVerifier {
n.y[1] = uint256(13392588948715843804641432497768002650278120570034223513918757245338268106653);
}
function get_wx_wg(uint256[] calldata proof, uint256[4] memory instances)
function get_wx_wg(uint256[] calldata proof, uint256[6] memory instances)
internal
view
returns (
@@ -354,15 +354,15 @@ library RollupVerifier {
(t0, t1) = (
ecc_mul(
13911018583007884881416842514661274050567796652031922980888952067142200734890,
6304656948134906299141761906515211516376236447819044970320185642735642777036,
16273630658577275004922498653030603356133576819117084202553121866583118864964,
6490159372778831696763963776713702553449715395136256408127406430701013586737,
instances[0]
)
);
(t0, t1) = (
ecc_mul_add(
10634526547038245645834822324032425487434811507756950001533785848774317018670,
11025818855933089539342999945076144168100709119485154428833847826982360951459,
21465583338900056601761668793508143213048509206826828900542864688378093593107,
18916078441896187703473496284050716429170517783995157941513585201547834049281,
instances[1],
t0,
t1
@@ -370,23 +370,41 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
13485936455723319058155687139769502499697405985650416391707184524158646623799,
16234009237501684544798205490615498675425737095147152991328466405207467143566,
6343857336395576108841088300387244434710621968858839561085778033655098739860,
8647137667680968494319179221347060255241434220013711910139382436020093396308,
instances[2],
t0,
t1
)
);
(t0, t1) = (
ecc_mul_add(
17609998990685530094209191702545036897101285294398654477281719279316619940710,
7891327626892441842954365090016786852185025910332850053066512639794082797200,
instances[3],
t0,
t1
)
);
(t0, t1) = (
ecc_mul_add(
1271298011119556361067568041994358027954229594187408866479678256322993207430,
16519855264988006509000373008036578681979317060055767197977112967887569978562,
instances[4],
t0,
t1
)
);
(m[0], m[1]) = (
ecc_mul_add(
21550585789286941025166870525096478397065943995678337623823808437877187678077,
4447338868884713453743453617617291019986465683944733951178865127876671635659,
instances[3],
9106880861932848269529912338578777683259870408474914617967634470292361865683,
3191458938194545761508145121615374474619318896841102235687991186359560600763,
instances[5],
t0,
t1
)
);
update_hash_scalar(18620528901694425296072105892920066495478887717015933899493919566746585676047, absorbing, 0);
update_hash_scalar(16714713909008743871958519800387174981836263428094013165455393524274317552599, absorbing, 0);
update_hash_point(m[0], m[1], absorbing, 2);
for (t0 = 0; t0 <= 4; t0++) {
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
@@ -413,31 +431,31 @@ library RollupVerifier {
update_hash_point(proof[137 + t0 * 2], proof[138 + t0 * 2], absorbing, 1 + t0 * 3);
}
m[8] = (squeeze_challenge(absorbing, 13));
m[9] = (mulmod(m[6], 6143038923529407703646399695489445107254060255791852207908457597807435305312, q_mod));
m[10] = (mulmod(m[6], 7358966525675286471217089135633860168646304224547606326237275077574224349359, q_mod));
m[11] = (mulmod(m[6], 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
m[12] = (fr_pow(m[6], 33554432));
m[9] = (mulmod(m[6], 13446667982376394161563610564587413125564757801019538732601045199901075958935, q_mod));
m[10] = (mulmod(m[6], 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
m[11] = (mulmod(m[6], 14803907026430593724305438564799066516271154714737734572920456128449769927233, q_mod));
m[12] = (fr_pow(m[6], 67108864));
m[13] = (addmod(m[12], q_mod - 1, q_mod));
m[14] = (mulmod(21888242219518804655518433051623070663413851959604507555939307129453691614729, m[13], q_mod));
m[14] = (mulmod(21888242545679039938882419398440172875981108180010270949818755658014750055173, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 1, q_mod));
m[14] = (fr_div(m[14], t0));
m[15] = (mulmod(3814514741328848551622746860665626251343731549210296844380905280010844577811, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
m[15] = (mulmod(3495999257316610708652455694658595065970881061159015347599790211259094641512, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 14803907026430593724305438564799066516271154714737734572920456128449769927233, q_mod));
m[15] = (fr_div(m[15], t0));
m[16] = (mulmod(14167635312934689395373925807699824183296350635557349457928542208657273886961, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 17329448237240114492580865744088056414251735686965494637158808787419781175510, q_mod));
m[16] = (mulmod(12851378806584061886934576302961450669946047974813165594039554733293326536714, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
m[16] = (fr_div(m[16], t0));
m[17] = (mulmod(12609034248192017902501772617940356704925468750503023243291639149763830461639, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
m[17] = (mulmod(14638077285440018490948843142723135319134576188472316769433007423695824509066, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 3693565015985198455139889557180396682968596245011005461846595820698933079918, q_mod));
m[17] = (fr_div(m[17], t0));
m[18] = (mulmod(12805242257443675784492534138904933930037912868081131057088370227525924812579, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 9741553891420464328295280489650144566903017206473301385034033384879943874347, q_mod));
m[18] = (mulmod(18027939092386982308810165776478549635922357517986691900813373197616541191289, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 17329448237240114492580865744088056414251735686965494637158808787419781175510, q_mod));
m[18] = (fr_div(m[18], t0));
m[19] = (mulmod(6559137297042406441428413756926584610543422337862324541665337888392460442551, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 5723528081196465413808013109680264505774289533922470433187916976440924869204, q_mod));
m[19] = (mulmod(912591536032578604421866340844550116335029274442283291811906603256731601654, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 6047398202650739717314770882059679662647667807426525133977681644606291529311, q_mod));
m[19] = (fr_div(m[19], t0));
m[20] = (mulmod(14811589476322888753142612645486192973009181596950146578897598212834285850868, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 7358966525675286471217089135633860168646304224547606326237275077574224349359, q_mod));
m[20] = (mulmod(17248638560015646562374089181598815896736916575459528793494921668169819478628, m[13], q_mod));
t0 = (addmod(m[6], q_mod - 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
m[20] = (fr_div(m[20], t0));
t0 = (addmod(m[15], m[16], q_mod));
t0 = (addmod(t0, m[17], q_mod));
@@ -724,8 +742,8 @@ library RollupVerifier {
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
(t0, t1) = (
ecc_mul_add(
5335172776193682293002595672140655300498265857728161236987288793112411362256,
9153855726472286104461915396077745889260526805920405949557461469033032628222,
1166255827574633395469889753099263335112651429543747917860844891223509395230,
18119530258797056675590474142263379269133137917926199526995010149706608452268,
m[78],
t0,
t1
@@ -733,8 +751,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
9026202793013131831701482540600751978141377016764300618037152689098701087208,
19644677619301694001087044142922327551116787792977369058786364247421954485859,
479654250230311733675045936187074887335076118790675548184957988765243051391,
3100719863754926915077773261837642988281275398456491618898287285885297258973,
m[77],
t0,
t1
@@ -742,8 +760,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10826234859452509771814283128042282248838241144105945602706734900173561719624,
5628243352113405764051108388315822074832358861640908064883601198703833923438,
3244117516185602927429536955777596704962143625995582449305913349309466588374,
4949447249861524239830935874731901209583893161129086694779290040738731707868,
m[76],
t0,
t1
@@ -751,8 +769,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
9833916648960859819503777242562918959056952519298917148524233826817297321072,
837915750759756490172805968793319594111899487492554675680829218939384285955,
14948547489533026990535642276664751166524290089518721217701084060838942037816,
4158304819018152066924650590566402375351800342702049911667413813453648544913,
m[75],
t0,
t1
@@ -760,8 +778,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10257805671474982710489846158410183388099935223468876792311814484878286190506,
6925081619093494730602614238209964215162532591387952125009817011864359314464,
12409839630787558779666051790740339639835641801241950167020910758875751567721,
10190386726927990167988725115981898191213252554332296547744162818590468069671,
m[74],
t0,
t1
@@ -769,8 +787,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4475887208248126488081900175351981014160135345959097965081514547035591501401,
17809934801579097157548855239127693133451078551727048660674021788322026074440,
17970998203939514710036667497443822563987440725661639935300105673829885028203,
5681616020208389658397995048088678631695525787311431942560298329387592854586,
m[73],
t0,
t1
@@ -778,8 +796,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8808629196631084710334110767449499515582902470045288549019060600095073238105,
13294364470509711632739201553507258372326885785844949555702886281377427438475,
5422170891120229182360564594866246906567981360038071999127508208070564034524,
14722029885921976755274052080011416898514630484317773275415621146460924728182,
m[72],
t0,
t1
@@ -787,8 +805,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
5025513109896000321643874120256520860696240548707294083465215087271048364447,
3512836639252013523316566987122028012000136443005216091303269685639094608348,
3955318928206501525438681058758319558200398421433597349851235741670899388496,
15892053452767975688653514510353871405466169306176036727161401156637227884251,
m[71],
t0,
t1
@@ -796,8 +814,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
20143075587083355112417414887372164250381042430441089145485481665404780784123,
9674175910548207533970570126063643897609459066877075659644076646142886425503,
18451207565454686459225553564649439057698581050443267052774483067774590965003,
4419693978684087696088612463773850574955779922948673330581664932100506990694,
m[70],
t0,
t1
@@ -805,8 +823,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
15449875505347857882486479091299788291220259329814373554032711960946424724459,
18962357525499685082729877436365914814836051345178637509857216081206536249101,
847101878434221983907574308143360385944534458215526175646288607915875901481,
2846353475656269162370753247605184679473264230467654203502980134120309217445,
m[69],
t0,
t1
@@ -814,8 +832,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8808629196631084710334110767449499515582902470045288549019060600095073238105,
13294364470509711632739201553507258372326885785844949555702886281377427438475,
5422170891120229182360564594866246906567981360038071999127508208070564034524,
14722029885921976755274052080011416898514630484317773275415621146460924728182,
m[68],
t0,
t1
@@ -823,8 +841,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18539453526841971932568089122596968064597086391356856358866942118522457107863,
3647865108410881496134024808028560930237661296032155096209994441023206530212,
12355852135968866678343538084506414981897123075397230437920965961095525036339,
19173350083521771086213125757940272853888577158427508914933730457941026326040,
m[67],
t0,
t1
@@ -832,8 +850,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
11667150339256836494926506499230187360957884531183800528342644917396989453992,
15540782144062394272475578831064080588044323224200171932910650185556553066875,
21537162186981550637121053147454964150809482185492418377558290311964245821909,
2173324946696678910860567153502925685634606622474439126082176533839311460335,
m[66],
t0,
t1
@@ -841,8 +859,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19267653195273486172950176174654469275684545789180737280515385961619717720594,
8975180971271331994178632284567744253406636398050840906044549681238954521839,
20702481083445183838662364419201395944400358423071711333544748994437443350157,
21729036491728923882358088642589857779818948470983153549909552615176584955200,
m[65],
t0,
t1
@@ -850,8 +868,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19156320437354843782276382482504062704637529342417677454208679985931193905144,
12513036134308417802230431028731202760516379532825961661396005403922128650283,
5211075648402252045446907842677410998750480902260529776286467677659191740672,
17759936859541227097052484319437171023743724174885338509498798745592136568923,
m[64],
t0,
t1
@@ -859,8 +877,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
14883199025754033315476980677331963148201112555947054150371482532558947065890,
19913319410736467436640597337700981504577668548125107926660028143291852201132,
5685082624811934526131077036509066197941130699019907200139767495570575867807,
9975752329518147542127949868789945608848626426600733728808879384778577859545,
m[63],
t0,
t1
@@ -868,8 +886,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18290509522533712126835038141804610778392690327965261406132668236833728306838,
3710298066677974093924183129147170087104393961634393354172472701713090868425,
1845955600044282712468400114813806019045133083112296001842856684609288249746,
6677624509889210837770197526955652810854887548330294041671470991988491766303,
m[62],
t0,
t1
@@ -877,8 +895,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19363467492491917195052183458025198134822377737629876295496853723068679518308,
5854330679906778271391785925618350923591828884998994352880284635518306250788,
17721426954552427189787075605835833086212392642349293317822925006771731953198,
10818582862561493154030196266254401851195091198556669943079029419869326006448,
m[61],
t0,
t1
@@ -886,8 +904,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
16181109780595136982201896766265118193466959602989950846464420951358063185297,
8811570609098296287610981932552574275858846837699446990256241563674576678567,
10224195420706066705577574946990328089867884648164309818089282930621493257750,
3961164971057442035153270823831734824136501489880082889417523554417504868473,
m[60],
t0,
t1
@@ -895,8 +913,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10062549363619405779400496848029040530770586215674909583260224093983878118724,
1989582851705118987083736605676322092152792129388805756040224163519806904905,
4155760488117491189818018229959225087159948854404593659816501566044290851616,
7849169269773333823959590214273366557169699873629739076719523623811579483219,
m[59],
t0,
t1
@@ -904,8 +922,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19016883348721334939078393300672242978266248861945205052660538073783955572863,
1976040209279107904310062622264754919366006151976304093568644070161390236037,
9303688548891777886487749234688027352493881691026887577351708905397127609597,
15420408437274623857443274867832176492025874147466147921781316121716419230415,
m[58],
t0,
t1
@@ -913,8 +931,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4833170740960210126662783488087087210159995687268566750051519788650425720369,
14321097009933429277686973550787181101481482473464521566076287626133354519061,
1713011977361327447402228333889074876456179272285913377605323580535155713105,
17494574374943878587945090358233307058027002207479570017169918665020362475592,
m[57],
t0,
t1
@@ -922,8 +940,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10610438624239085062445835373411523076517149007370367578847561825933262473434,
14538778619212692682166219259545768162136692909816914624880992580957990166795,
688560977158667877997491129442687540611216305867558421257325952561991356422,
1877117185103259325255107191485730322497880777053300656925558921917058739650,
m[56],
t0,
t1
@@ -937,12 +955,15 @@ library RollupVerifier {
}
function verify(uint256[] calldata proof, uint256[] calldata target_circuit_final_pair) public view {
uint256[4] memory instances;
uint256[6] memory instances;
instances[0] = target_circuit_final_pair[0] & ((1 << 136) - 1);
instances[1] = (target_circuit_final_pair[0] >> 136) + ((target_circuit_final_pair[1] & 1) << 136);
instances[2] = target_circuit_final_pair[2] & ((1 << 136) - 1);
instances[3] = (target_circuit_final_pair[2] >> 136) + ((target_circuit_final_pair[3] & 1) << 136);
instances[4] = target_circuit_final_pair[4];
instances[5] = target_circuit_final_pair[5];
uint256 x0 = 0;
uint256 x1 = 0;
uint256 y0 = 0;
@@ -961,7 +982,7 @@ library RollupVerifier {
g2_points[1] = get_verify_circuit_g2_n();
checked = pairing(g1_points, g2_points);
require(checked, "verified failed");
require(checked);
g1_points[0].x = target_circuit_final_pair[0];
g1_points[0].y = target_circuit_final_pair[1];
@@ -971,6 +992,6 @@ library RollupVerifier {
g2_points[1] = get_target_circuit_g2_n();
checked = pairing(g1_points, g2_points);
require(checked, "verified failed");
require(checked);
}
}

View File

@@ -14,8 +14,9 @@ test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.so ../interface/
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
find ../common | grep libzktrie.so | xargs -i cp {} ./verifier/lib/
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -3,8 +3,8 @@
package verifier
/*
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -L${SRCDIR}/lib/ -lcudart -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/

View File

@@ -193,7 +193,7 @@ func (o *blockBatchOrm) ResetProvingStatusFor(before ProvingStatus) error {
}
func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, totalL2Gas uint64) (string, error) {
row := dbTx.QueryRow("SELECT COALESCE(MAX(index), 0) FROM block_batch;")
row := dbTx.QueryRow("SELECT COALESCE(MAX(index), -1) FROM block_batch;")
// TODO: use *big.Int for this
var index int64
@@ -236,29 +236,6 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
return true, nil
}
func (o *blockBatchOrm) GetBatchesByRollupStatus(status RollupStatus, limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, status, limit)
if err != nil {
return nil, err
}
var ids []string
for rows.Next() {
var id string
if err = rows.Scan(&id); err != nil {
break
}
ids = append(ids, id)
}
if len(ids) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no pending batches in db", "err", err)
} else if err != nil {
return nil, err
}
return ids, rows.Close()
}
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
if err != nil {
@@ -430,6 +407,15 @@ func (o *blockBatchOrm) GetAssignedBatchIDs() ([]string, error) {
return ids, rows.Close()
}
func (o *blockBatchOrm) GetBatchCount() (int64, error) {
row := o.db.QueryRow(`select count(*) from block_batch`)
var count int64
if err := row.Scan(&count); err != nil {
return -1, err
}
return count, nil
}
func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), RollupFinalizationSkipped, ProvingTaskSkipped, ProvingTaskFailed, RollupCommitted)
if err != nil {

View File

@@ -46,7 +46,6 @@ type L1Message struct {
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Status MsgStatus `json:"status" db:"status"`
}
@@ -63,7 +62,6 @@ type L2Message struct {
Target string `json:"target" db:"target"`
Calldata string `json:"calldata" db:"calldata"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Status MsgStatus `json:"status" db:"status"`
}
@@ -147,7 +145,6 @@ type BlockBatchOrm interface {
ResetProvingStatusFor(before ProvingStatus) error
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
BatchRecordExist(id string) (bool, error)
GetBatchesByRollupStatus(status RollupStatus, limit uint64) ([]string, error)
GetPendingBatches(limit uint64) ([]string, error)
GetCommittedBatches(limit uint64) ([]string, error)
GetRollupStatus(id string) (RollupStatus, error)
@@ -158,6 +155,7 @@ type BlockBatchOrm interface {
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error
GetAssignedBatchIDs() ([]string, error)
UpdateSkippedBatches() (int64, error)
GetBatchCount() (int64, error)
GetCommitTxHash(id string) (sql.NullString, error) // for unit tests only
GetFinalizeTxHash(id string) (sql.NullString, error) // for unit tests only
@@ -169,7 +167,6 @@ type L1MessageOrm interface {
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
GetL1ProcessedNonce() (int64, error)
GetL1Messages(fields map[string]interface{}, args ...string) ([]*L1Message, error)
SaveL1Messages(ctx context.Context, messages []*L1Message) error
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error

View File

@@ -4,8 +4,6 @@ import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
@@ -48,7 +46,7 @@ func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
if err != nil {
return nil, err
}
@@ -89,37 +87,6 @@ func (m *l1MessageOrm) GetL1ProcessedNonce() (int64, error) {
return -1, nil
}
// GetL1Messages get l1 messages by k-v map and args.
func (m *l1MessageOrm) GetL1Messages(fields map[string]interface{}, args ...string) ([]*L1Message, error) {
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash, status FROM l2_message WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf(" AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := m.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
var msgs []*L1Message
for rows.Next() {
msg := &L1Message{}
if err = rows.StructScan(&msg); err != nil {
break
}
msgs = append(msgs, msg)
}
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer2 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return msgs, rows.Close()
}
// SaveL1Messages batch save a list of layer1 messages
func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
if len(messages) == 0 {
@@ -140,10 +107,9 @@ func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*L1Message
"deadline": msg.Deadline,
"calldata": msg.Calldata,
"layer1_hash": msg.Layer1Hash,
"layer2_hash": msg.Layer2Hash,
}
}
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash, :layer2_hash);`, messageMaps)
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash);`, messageMaps)
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))

View File

@@ -88,9 +88,9 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
return -1, nil
}
// GetL2Messages fetch list of messages given msg status
// GetL2MessagesByStatus fetch list of messages given msg status
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash, layer1_hash FROM l2_message WHERE 1 = 1 "
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
@@ -138,12 +138,11 @@ func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*L2Mes
"gas_limit": msg.GasLimit,
"deadline": msg.Deadline,
"calldata": msg.Calldata,
"layer1_hash": msg.Layer1Hash,
"layer2_hash": msg.Layer2Hash,
}
}
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash, :layer2_hash);`, messageMaps)
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer2_hash);`, messageMaps)
if err != nil {
nonces := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))

View File

@@ -10,8 +10,9 @@ else
endif
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.so ../interface/
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
rm -rf ./prover/lib && cp -r ../common/libzkp/interface ./prover/lib
find ../common | grep libzktrie.so | xargs -i cp {} ./prover/lib/
roller: libzkp ## Build the Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd

View File

@@ -6,7 +6,14 @@ make clean && make roller
```
## Start
- use config.toml
- Set environment variables
```shell
export CHAIN_ID=534353 # change to correct chain_id
export RUST_MIN_STACK=100000000
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib:/usr/local/cuda/ # cuda only for GPU machine
```
- Use config.toml
```shell
./build/bin/roller
```

View File

@@ -1,41 +0,0 @@
{
"coinbase": {
"address": "0x7157f3b0aee00adbe3d8b6609eda9480e141065a",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"header": {
"parentHash": "0xde613062d01fdfb97065e60ac4bc0da9118e80c1e394007b68dafa542e043d53",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1",
"number": "0x1",
"gasLimit": "0x37f94131",
"gasUsed": "0x0",
"timestamp": "0x63808894",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000002e12fa7e17d64b31990ba42a4c726fc620c51ff9be07c1e151ee909f9a43329d0853a8902b60e94da9f3979fb91dec57022b8962c146e3c265c6b4eecc282d0600",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x342770c0",
"hash": "0xfa0235b7e860c08d5156a18c1f4d6fd89eed8202de7f3043bd10d46a4bb3f8c4"
},
"transactions": [],
"storageTrace": {
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"proofs": {
"0x7157F3b0AEe00adBe3D8B6609edA9480E141065a": [
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": []
}

View File

@@ -1,41 +0,0 @@
{
"coinbase": {
"address": "0xcb733b0fd0186ff37e7f717a0889afff71dde477",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"header": {
"parentHash": "0xfa0235b7e860c08d5156a18c1f4d6fd89eed8202de7f3043bd10d46a4bb3f8c4",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x37eb42e2",
"gasUsed": "0x0",
"timestamp": "0x63808897",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000008c3d2a4a86b50b40f9651690270aac1bbb5c9ccba9f8fe199a4d55bd773a88296557a19219888f3019477369c8ef6e544e9bfa2411fbac16563c89826356810a00",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x2da282a8",
"hash": "0x74a0c485b46c9a2817dbc633c8afafca4552dbd8781e5d9510e274b571eae422"
},
"transactions": [],
"storageTrace": {
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"proofs": {
"0xCB733b0fd0186FF37e7f717a0889AfFF71DdE477": [
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": []
}

View File

@@ -1,41 +0,0 @@
{
"coinbase": {
"address": "0x7157f3b0aee00adbe3d8b6609eda9480e141065a",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"header": {
"parentHash": "0x74a0c485b46c9a2817dbc633c8afafca4552dbd8781e5d9510e274b571eae422",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x3",
"gasLimit": "0x37dd4813",
"gasUsed": "0x0",
"timestamp": "0x6380889a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e7578000000000000ec9f6fafe9ec47577b0fac5d63f6abb19bb69c071ca7b21d3b7f014a60ccf5fb3507c3a3c4cf2c1159533f2081a9db54f55831c6a31d652c0ff9ff75ca4d081100",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x27ee3253",
"hash": "0x33292f2ec508af712c7f98dc3799021b4a3391dfa6456ef8041f8aa1556c1bc0"
},
"transactions": [],
"storageTrace": {
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"proofs": {
"0x7157F3b0AEe00adBe3D8B6609edA9480E141065a": [
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": []
}

View File

@@ -1,41 +0,0 @@
{
"coinbase": {
"address": "0xadf5218f7ca8c80d90ff63af5fef486af57c2096",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"header": {
"parentHash": "0x33292f2ec508af712c7f98dc3799021b4a3391dfa6456ef8041f8aa1556c1bc0",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x4",
"gasLimit": "0x37cf50c2",
"gasUsed": "0x0",
"timestamp": "0x6380889d",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000006c7674d5a049e0d4e5d884745f98b17df096eb9814ce788e232bb55976ebba271a3b59cf5e5c69eeb08cb6679453e05ccc4f6279d023beb0e392816c16b113df00",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x22f06c09",
"hash": "0x4b1fb45bfaa6e7662cb1331312f10575997b976bbd772332681a9a005adfc329"
},
"transactions": [],
"storageTrace": {
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
"proofs": {
"0xadf5218f7ca8C80d90Ff63af5FEF486Af57C2096": [
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": []
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

42482
roller/assets/traces/196.json Normal file

File diff suppressed because one or more lines are too long

View File

@@ -4,8 +4,8 @@
package prover
/*
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -L${SRCDIR}/lib/ -lcudart -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/

View File

@@ -5,6 +5,6 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
PROJ_DIR=$DIR"/.."
mkdir -p $PROJ_DIR/assets/params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_seed -O $PROJ_DIR/assets/seed
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_params/params18 -O $PROJ_DIR/assets/params/params18
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_params/params25 -O $PROJ_DIR/assets/params/params25
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_seed -O $PROJ_DIR/assets/seed
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_params/params19 -O $PROJ_DIR/assets/params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_params/params26 -O $PROJ_DIR/assets/params/params26