Compare commits

..

14 Commits

Author SHA1 Message Date
maskpp
4df23100d1 fix(sender): Update estimate gas logic. (#354)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-10 20:37:36 +08:00
Lawliet-Chan
ee4c00eb6b fix(roller): fix stack bug (#320)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-06 19:59:33 +08:00
Xi Lin
e1ec4d1f05 fix(contract): forbid to call message queue and l2 messenger from l1 (#341)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-06 14:31:33 +08:00
Lawliet-Chan
44e27b1110 bugfix(libzkp): fix difficulty (#343) 2023-03-03 22:09:29 +08:00
Péter Garamvölgyi
e78cff529c fix: reduce finalize batch tx frequency (#332) 2023-03-01 09:01:15 +01:00
ChuhanJin
4c0ff9306b fix(build): jenkinsfile tag job optimized and fix (#331)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-01 14:39:43 +08:00
Lawliet-Chan
669f3f45b4 Fix(zkevm): fix zkevm bug for goerli. (#334) 2023-03-01 14:28:18 +08:00
HAOYUatHZ
24c7a632f2 fix(db): fix SetMaxOpenConns (#328) 2023-02-28 15:15:15 +08:00
Haichen Shen
cc64c29f56 feat(batch proposer): add time limit to commit batches (#323) 2023-02-25 16:25:09 +08:00
HAOYUatHZ
780d6b326f fix(bridge): fix typos (#321) 2023-02-23 19:41:12 +08:00
Xi Lin
92e70432e4 feat(bridge): only update gas price oracle for exceeding diff threshold (#319) 2023-02-23 19:14:29 +08:00
HAOYUatHZ
6f3eddf773 fix(config): fix typos (#315) 2023-02-23 12:50:09 +08:00
Péter Garamvölgyi
6fcd6b1b6c fix: Flush buffered writer (#314) 2023-02-22 18:01:19 +01:00
Xi Lin
9e2f2c3e9c fix(bridge): fix batch proposer (#312)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-02-22 16:28:29 +01:00
26 changed files with 394 additions and 143 deletions

View File

@@ -20,6 +20,11 @@
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
@@ -48,6 +53,11 @@
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
@@ -63,6 +73,7 @@
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 44,
"batch_time_sec": 300,
"batch_commit_time_sec": 1200,
"batch_blocks_limit": 100,
"commit_tx_calldata_size_limit": 200000,
"public_input_config": {
@@ -73,6 +84,8 @@
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}
}

View File

@@ -34,6 +34,8 @@ type BatchProposerConfig struct {
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Time waited to commit batches before the calldata met CommitTxCalldataSizeLimit
BatchCommitTimeSec uint64 `json:"batch_commit_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas

View File

@@ -42,15 +42,27 @@ type RelayerConfig struct {
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
GasPriceOracleContractAddress common.Address `json:"gas_proce_oracle_contract_address"`
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// gas oracle config
GasOracleConfig *GasOracleConfig `json:"gas_oracle_config"`
// The interval in which we send finalize batch transactions.
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// GasOracleConfig The config for updating gas price oracle.
type GasOracleConfig struct {
// MinGasPrice store the minimum gas price to set.
MinGasPrice uint64 `json:"min_gas_price"`
// GasPriceDiff store the percentage of gas price difference.
GasPriceDiff uint64 `json:"gas_price_diff"`
}
// relayerConfigAlias RelayerConfig alias name
type relayerConfigAlias RelayerConfig

View File

@@ -22,6 +22,12 @@ import (
"scroll-tech/bridge/sender"
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -43,6 +49,10 @@ type Layer1Relayer struct {
gasOracleCh <-chan *sender.Confirmation
l1GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
stopCh chan struct{}
}
@@ -59,10 +69,20 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
log.Error("new GasOracleSender failed", "main address", addr.String(), "err", err)
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
return &Layer1Relayer{
ctx: ctx,
db: db,
@@ -75,6 +95,9 @@ func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
gasOracleCh: gasOracleSender.ConfirmChan(),
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
stopCh: make(chan struct{}),
}, nil
@@ -147,25 +170,31 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
block := blocks[0]
if block.GasOracleStatus == types.GasOraclePending {
baseFee := big.NewInt(int64(block.BaseFee))
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", baseFee)
if err != nil {
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
baseFee := big.NewInt(int64(block.BaseFee))
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", baseFee)
if err != nil {
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "err", err)
return
}
return
}
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
}
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
}
r.lastGasPrice = block.BaseFee
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
}

View File

@@ -64,6 +64,7 @@ type BatchProposer struct {
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
batchCommitTimeSec uint64
commitCalldataSizeLimit uint64
batchDataBufferSizeLimit uint64
@@ -86,6 +87,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
batchCommitTimeSec: cfg.BatchCommitTimeSec,
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
proofGenerationFreq: cfg.ProofGenerationFreq,
@@ -124,6 +126,7 @@ func (p *BatchProposer) Start() {
case <-ticker.C:
p.tryProposeBatch()
p.tryCommitBatches()
}
}
}(ctx)
@@ -225,11 +228,16 @@ func (p *BatchProposer) tryProposeBatch() {
p.proposeBatch(blocks)
}
p.tryCommitBatches()
}
func (p *BatchProposer) tryCommitBatches() {
p.mutex.Lock()
defer p.mutex.Unlock()
if len(p.batchDataBuffer) == 0 {
return
}
// estimate the calldata length to determine whether to commit the pending batches
index := 0
commit := false
@@ -249,7 +257,7 @@ func (p *BatchProposer) tryCommitBatches() {
break
}
}
if !commit {
if !commit && p.batchDataBuffer[0].Timestamp()+p.batchCommitTimeSec > uint64(time.Now().Unix()) {
return
}
@@ -287,14 +295,13 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
return
}
var (
length = len(blocks)
gasUsed, txNum uint64
)
var gasUsed, txNum uint64
reachThreshold := false
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) {
blocks = blocks[:i]
reachThreshold = true
break
}
gasUsed += block.GasUsed
@@ -304,7 +311,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return
}

View File

@@ -29,6 +29,12 @@ import (
"scroll-tech/bridge/utils"
)
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -55,6 +61,10 @@ type Layer2Relayer struct {
gasOracleCh <-chan *sender.Confirmation
l2GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
@@ -91,6 +101,16 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
return &Layer2Relayer{
ctx: ctx,
db: db,
@@ -109,6 +129,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db databa
gasOracleCh: gasOracleSender.ConfirmChan(),
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
@@ -238,25 +261,32 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "block.BaseFee", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
return
}
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
@@ -324,22 +354,29 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
batchHashes, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) == 0 {
if len(batchHashes) == 0 {
return
}
hash := batches[0]
hash := batchHashes[0]
// @todo add support to relay multiple batches
status, err := r.db.GetProvingStatusByHash(hash)
batches, err := r.db.GetBlockBatches(map[string]interface{}{"hash": hash}, "LIMIT 1")
if err != nil {
log.Error("GetProvingStatusByHash failed", "hash", hash, "err", err)
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
return
}
if len(batches) == 0 {
log.Error("Unexpected result for GetBlockBatches", "hash", hash, "len", 0)
return
}
batch := batches[0]
status := batch.ProvingStatus
switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
@@ -362,6 +399,34 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
log.Info("Start to roll up zk proof", "hash", hash)
success := false
previousBatch, err := r.db.GetLatestFinalizingOrFinalizedBatch()
// skip submitting proof
if err == nil && uint64(batch.CreatedAt.Sub(*previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
log.Info(
"Not enough time passed, skipping",
"hash", hash,
"createdAt", batch.CreatedAt,
"lastFinalizingHash", previousBatch.Hash,
"lastFinalizingStatus", previousBatch.RollupStatus,
"lastFinalizingCreatedAt", previousBatch.CreatedAt,
)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} else {
success = true
}
return
}
// handle unexpected db error
if err != nil && err.Error() != "sql: no rows in result set" {
log.Error("Failed to get latest finalized batch", "err", err)
return
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {

View File

@@ -0,0 +1,69 @@
package sender
import (
"math/big"
"sync/atomic"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
)
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte) (*FeeData, error) {
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
if err != nil {
return nil, err
}
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value)
if err != nil {
return nil, err
}
return &FeeData{
gasPrice: gasPrice,
gasLimit: gasLimit,
}, nil
}
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte) (*FeeData, error) {
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
return nil, err
}
baseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
baseFee.SetUint64(feeGas)
}
gasFeeCap := new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(baseFee, big.NewInt(2)),
)
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value)
if err != nil {
return nil, err
}
return &FeeData{
gasLimit: gasLimit,
gasTipCap: gasTipCap,
gasFeeCap: gasFeeCap,
}, nil
}
func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
msg := ethereum.CallMsg{
From: opts.From,
To: contract,
GasPrice: gasPrice,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: value,
Data: input,
}
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
if err != nil {
return 0, err
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
return gasLimit, nil
}

View File

@@ -12,10 +12,8 @@ import (
"sync/atomic"
"time"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
@@ -153,37 +151,10 @@ func (s *Sender) NumberOfAccounts() int {
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
if s.config.TxType == DynamicFeeTxType {
return s.estimateDynamicGas(auth, target, value, data)
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
// @todo change it when Scroll enable EIP1559
if s.config.TxType != DynamicFeeTxType {
// estimate gas price
var gasPrice *big.Int
gasPrice, err = s.client.SuggestGasPrice(s.ctx)
if err != nil {
return nil, err
}
return &FeeData{
gasPrice: gasPrice,
gasLimit: gasLimit,
}, nil
}
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
return nil, err
}
// Make sure feeCap is bigger than txpool's gas price. 1000000000 is l2geth's default pool.gas value.
baseFee := atomic.LoadUint64(&s.baseFeePerGas)
maxFeePerGas := math.BigMax(big.NewInt(int64(baseFee)), big.NewInt(1000000000))
return &FeeData{
gasFeeCap: math.BigMax(maxFeePerGas, gasTipCap),
gasTipCap: math.BigMin(maxFeePerGas, gasTipCap),
gasLimit: gasLimit,
}, nil
return s.estimateLegacyGas(auth, target, value, data)
}
// SendTransaction send a signed L2tL1 transaction.

View File

@@ -1,6 +1,6 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
TAGNAME = ''
pipeline {
agent any
options {
@@ -41,16 +41,38 @@ pipeline {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
sh "docker login --username=$dockerUser --password=$dockerPassword"
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
script {
try {
sh "docker manifest inspect scrolltech/bridge:$TAGNAME > /dev/null"
} catch (e) {
// only build if the tag non existed
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
throw e
}
}
}
catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') {
script {
try {
sh "docker manifest inspect scrolltech/coordinator:$TAGNAME > /dev/null"
} catch (e) {
// only build if the tag non existed
//sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C coordinator docker"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
throw e
}
}
}
}
}
}
}
}
}
}

View File

@@ -368,9 +368,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
[[package]]
name = "bounded-collections"
version = "0.1.5"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370"
checksum = "de2aff4807e40f478132150d80b031f2461d88f061851afcab537d7600c24120"
dependencies = [
"log",
"parity-scale-codec",
@@ -2902,9 +2902,9 @@ dependencies = [
[[package]]
name = "parity-scale-codec"
version = "3.4.0"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac"
checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed"
dependencies = [
"arrayvec 0.7.2",
"bitvec 1.0.1",
@@ -5014,7 +5014,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=goerli-0215#d0d3338663a0b3eee51e9d044ab96a7899d70252"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=goerli-0215#1f7a3c7da2370860087555a11346bd5d96f609fd"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -5682,7 +5682,7 @@ dependencies = [
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=goerli-0215#d0d3338663a0b3eee51e9d044ab96a7899d70252"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=goerli-0215#1f7a3c7da2370860087555a11346bd5d96f609fd"
dependencies = [
"anyhow",
"blake2",

View File

@@ -7,6 +7,7 @@ import (
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
@@ -38,6 +39,14 @@ type BatchData struct {
piCfg *PublicInputHashConfig
}
// Timestamp returns the timestamp of the first block in the BlockData.
func (b *BatchData) Timestamp() uint64 {
if len(b.Batch.Blocks) == 0 {
return 0
}
return b.Batch.Blocks[0].Timestamp
}
// Hash calculates the hash of this batch.
func (b *BatchData) Hash() *common.Hash {
if b.hash != nil {
@@ -140,6 +149,7 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
// fill in RLP-encoded transactions
for _, txData := range trace.Transactions {
data, _ := hexutil.Decode(txData.Data)
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
@@ -147,15 +157,12 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: []byte(txData.Data),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
var rlpBuf bytes.Buffer
writer := bufio.NewWriter(&rlpBuf)
_ = tx.EncodeRLP(writer)
rlpTxData := rlpBuf.Bytes()
rlpTxData, _ := tx.MarshalBinary()
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
_, _ = batchTxDataWriter.Write(txLen[:])
@@ -175,6 +182,10 @@ func NewBatchData(parentBatch *BlockBatch, blockTraces []*types.BlockTrace, piCf
}
}
if err := batchTxDataWriter.Flush(); err != nil {
panic("Buffered I/O flush failed")
}
batch.L2Transactions = batchTxDataBuf.Bytes()
batchData.piCfg = piCfg

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "alpha-v1.4"
var tag = "alpha-v1.16"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -262,7 +262,9 @@ contract L2ScrollMessenger is ScrollMessengerBase, PausableUpgradeable, IL2Scrol
bytes memory _message,
bytes32 _xDomainCalldataHash
) internal {
// @todo check `_to` address to avoid attack.
// @todo check more `_to` address to avoid attack.
require(_to != messageQueue, "Forbid to call message queue");
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");

View File

@@ -237,6 +237,7 @@ contract L2CustomERC20GatewayTest is L2GatewayTestBase {
) public {
// blacklist some addresses
hevm.assume(recipient != address(0));
hevm.assume(recipient != address(gateway));
gateway.updateTokenMapping(address(l2Token), address(l1Token));

View File

@@ -0,0 +1,50 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import { DSTestPlus } from "solmate/test/utils/DSTestPlus.sol";
import { L1BlockContainer } from "../L2/predeploys/L1BlockContainer.sol";
import { L1GasPriceOracle } from "../L2/predeploys/L1GasPriceOracle.sol";
import { L2MessageQueue } from "../L2/predeploys/L2MessageQueue.sol";
import { Whitelist } from "../L2/predeploys/Whitelist.sol";
import { L1ScrollMessenger } from "../L1/L1ScrollMessenger.sol";
import { L2ScrollMessenger } from "../L2/L2ScrollMessenger.sol";
contract L2ScrollMessengerTest is DSTestPlus {
L1ScrollMessenger internal l1Messenger;
address internal feeVault;
Whitelist private whitelist;
L2ScrollMessenger internal l2Messenger;
L1BlockContainer internal l1BlockContainer;
L2MessageQueue internal l2MessageQueue;
L1GasPriceOracle internal l1GasOracle;
function setUp() public {
// Deploy L1 contracts
l1Messenger = new L1ScrollMessenger();
// Deploy L2 contracts
whitelist = new Whitelist(address(this));
l1BlockContainer = new L1BlockContainer(address(this));
l2MessageQueue = new L2MessageQueue(address(this));
l1GasOracle = new L1GasPriceOracle(address(this));
l2Messenger = new L2ScrollMessenger(address(l1BlockContainer), address(l1GasOracle), address(l2MessageQueue));
// Initialize L2 contracts
l2Messenger.initialize(address(l1Messenger), feeVault);
l2MessageQueue.initialize();
l2MessageQueue.updateMessenger(address(l2Messenger));
l1GasOracle.updateWhitelist(address(whitelist));
}
function testForbidCallFromL1() external {
hevm.expectRevert("Forbid to call message queue");
l2Messenger.relayMessage(address(this), address(l2MessageQueue), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call self");
l2Messenger.relayMessage(address(this), address(l2Messenger), 0, 0, new bytes(0));
}
}

View File

@@ -269,6 +269,7 @@ contract L2StandardERC20GatewayTest is L2GatewayTestBase {
) public {
// blacklist some addresses
hevm.assume(recipient != address(0));
hevm.assume(recipient != address(gateway));
amount = bound(amount, 1, l2Token.balanceOf(address(this)));

View File

@@ -223,6 +223,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
) public {
// blacklist some addresses
hevm.assume(recipient != address(0));
hevm.assume(recipient != address(gateway));
amount = bound(amount, 1, l2weth.balanceOf(address(this)));

View File

@@ -12,7 +12,9 @@
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2_config": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc"

View File

@@ -12,8 +12,8 @@ type DBConfig struct {
DSN string `json:"dsn"`
DriverName string `json:"driver_name"`
MaxOpenNum int `json:"maxOpenNum" default:"200"`
MaxIdleNum int `json:"maxIdleNum" default:"20"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
}
// NewConfig returns a new instance of Config.

View File

@@ -1,4 +1,6 @@
{
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
"driver_name": "postgres"
"driver_name": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
}

View File

@@ -194,6 +194,15 @@ func (o *blockBatchOrm) GetLatestFinalizedBatch() (*types.BlockBatch, error) {
return batch, nil
}
func (o *blockBatchOrm) GetLatestFinalizingOrFinalizedBatch() (*types.BlockBatch, error) {
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1 or rollup_status = $2);`, types.RollupFinalizing, types.RollupFinalized)
batch := &types.BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
}
return batch, nil
}
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT hash FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, types.RollupCommitted, limit)
if err != nil {

View File

@@ -63,6 +63,7 @@ type BlockBatchOrm interface {
GetLatestBatch() (*types.BlockBatch, error)
GetLatestCommittedBatch() (*types.BlockBatch, error)
GetLatestFinalizedBatch() (*types.BlockBatch, error)
GetLatestFinalizingOrFinalizedBatch() (*types.BlockBatch, error)
UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error

View File

@@ -38,7 +38,7 @@ func NewOrmFactory(cfg *DBConfig) (OrmFactory, error) {
return nil, err
}
db.SetMaxIdleConns(cfg.MaxOpenNum)
db.SetMaxOpenConns(cfg.MaxOpenNum)
db.SetMaxIdleConns(cfg.MaxIdleNum)
if err = db.Ping(); err != nil {
return nil, err

View File

@@ -241,7 +241,7 @@ func (r *Roller) prove() error {
}
} else {
// when the roller has more than 3 times panic,
// it will omit to prove the task, submit StatusProofError and then Pop the task.
// it will omit to prove the task, submit StatusProofError and then Delete the task.
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: "zk proving panic",
@@ -251,7 +251,7 @@ func (r *Roller) prove() error {
}
defer func() {
_, err = r.stack.Pop()
err = r.stack.Delete(task.Task.ID)
if err != nil {
log.Error("roller stack pop failed!", "err", err)
}

View File

@@ -81,28 +81,12 @@ func (s *Stack) Peek() (*ProvingTask, error) {
return traces, nil
}
// Pop pops the proving-task on the top of Stack.
func (s *Stack) Pop() (*ProvingTask, error) {
var value []byte
if err := s.Update(func(tx *bbolt.Tx) error {
var key []byte
// Delete pops the proving-task on the top of Stack.
func (s *Stack) Delete(taskID string) error {
return s.Update(func(tx *bbolt.Tx) error {
bu := tx.Bucket(bucket)
c := bu.Cursor()
key, value = c.Last()
return bu.Delete(key)
}); err != nil {
return nil, err
}
if len(value) == 0 {
return nil, ErrEmpty
}
task := &ProvingTask{}
err := json.Unmarshal(value, task)
if err != nil {
return nil, err
}
return task, nil
return bu.Delete([]byte(taskID))
})
}
// UpdateTimes udpates the roller prove times of the proving task.

View File

@@ -36,10 +36,12 @@ func TestStack(t *testing.T) {
}
for i := 2; i >= 0; i-- {
var pop *ProvingTask
pop, err = s.Pop()
var peek *ProvingTask
peek, err = s.Peek()
assert.NoError(t, err)
assert.Equal(t, strconv.Itoa(i), peek.Task.ID)
err = s.Delete(strconv.Itoa(i))
assert.NoError(t, err)
assert.Equal(t, strconv.Itoa(i), pop.Task.ID)
}
// test times
@@ -52,18 +54,13 @@ func TestStack(t *testing.T) {
}
err = s.Push(task)
assert.NoError(t, err)
pop, err := s.Pop()
assert.NoError(t, err)
err = s.Push(pop)
assert.NoError(t, err)
peek, err := s.Peek()
assert.NoError(t, err)
pop2, err := s.Pop()
assert.Equal(t, 0, peek.Times)
err = s.UpdateTimes(peek, 3)
assert.NoError(t, err)
assert.Equal(t, peek, pop2)
s.UpdateTimes(pop2, 1)
peek2, err := s.Peek()
assert.NoError(t, err)
assert.Equal(t, 1, pop2.Times)
assert.Equal(t, 3, peek2.Times)
}