mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
7 Commits
maskpp/res
...
goerli
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97dded9619 | ||
|
|
f48762bf33 | ||
|
|
42190feb6c | ||
|
|
56080204c5 | ||
|
|
2674dfaf69 | ||
|
|
5bffb151a3 | ||
|
|
fadaec7add |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,7 +3,6 @@ assets/params*
|
||||
assets/seed
|
||||
coverage.txt
|
||||
build/bin
|
||||
*.integration.txt
|
||||
|
||||
# misc
|
||||
sftp-config.json
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
@@ -51,10 +49,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
// Init db connection.
|
||||
// init db connection
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"l1_config": {
|
||||
"confirmations": "0x6",
|
||||
"confirmations": 6,
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
@@ -11,13 +11,12 @@
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"check_pending_time": 3,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": "0x1",
|
||||
"confirmations": 1,
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 500
|
||||
"tx_type": "AccessListTx",
|
||||
"min_balance": 100000000000000000000
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
@@ -25,7 +24,7 @@
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": "0x1",
|
||||
"confirmations": 1,
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
@@ -35,13 +34,12 @@
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"check_pending_time": 10,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": "0x6",
|
||||
"confirmations": 6,
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 500
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"min_balance": 100000000000000000000
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
import "github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
// L1Config loads l1eth configuration items.
|
||||
type L1Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
// l1 eth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
|
||||
@@ -3,15 +3,13 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
type L2Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
// l2geth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The messenger contract address deployed on layer 2 chain.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// SenderConfig The config for transaction sender
|
||||
@@ -20,7 +19,7 @@ type SenderConfig struct {
|
||||
// The number of blocks to wait to escalate increase gas price of the transaction.
|
||||
EscalateBlocks uint64 `json:"escalate_blocks"`
|
||||
// The gap number between a block be confirmed and the latest block.
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
// The numerator of gas price escalate multiple.
|
||||
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
|
||||
// The denominator of gas price escalate multiple.
|
||||
@@ -30,8 +29,7 @@ type SenderConfig struct {
|
||||
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
|
||||
TxType string `json:"tx_type"`
|
||||
// The min balance set for check and set balance for sender's accounts.
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
PendingLimit int64 `json:"pending_limit,omitempty"`
|
||||
MinBalance *big.Int `json:"min_balance,omitempty"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.18
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
@@ -32,14 +32,14 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -350,10 +350,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -424,8 +425,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -540,8 +541,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -553,7 +554,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -26,7 +26,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,17 +3,16 @@ package l1
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
@@ -30,6 +29,7 @@ import (
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
sender *sender.Sender
|
||||
|
||||
db orm.L1MessageOrm
|
||||
@@ -43,7 +43,7 @@ type Layer1Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Warn("new L2MessengerABI failed", "err", err)
|
||||
@@ -57,97 +57,16 @@ func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.Rela
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layer1 := &Layer1Relayer{
|
||||
return &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
client: ethClient,
|
||||
sender: sender,
|
||||
db: db,
|
||||
l2MessengerABI: l2MessengerABI,
|
||||
cfg: cfg,
|
||||
stopCh: make(chan struct{}),
|
||||
confirmationCh: sender.ConfirmChan(),
|
||||
}
|
||||
|
||||
// Deal with broken transactions.
|
||||
if err = layer1.prepare(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return layer1, nil
|
||||
}
|
||||
|
||||
// prepare to run check logic and until it's finished.
|
||||
func (r *Layer1Relayer) prepare(ctx context.Context) error {
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.confirmationCh:
|
||||
if !cfm.IsSuccessful {
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
if err := r.checkSubmittedMessages(); err != nil {
|
||||
log.Error("failed to init layer1 submitted tx", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever util sender is empty.
|
||||
utils.TryTimes(-1, func() bool {
|
||||
return r.sender.PendingCount() == 0
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) checkSubmittedMessages() error {
|
||||
var blockNumber uint64
|
||||
BEGIN:
|
||||
msgs, err := r.db.GetL1Messages(
|
||||
map[string]interface{}{"status": orm.MsgSubmitted},
|
||||
fmt.Sprintf("AND height > %d", blockNumber),
|
||||
fmt.Sprintf("ORDER BY height ASC LIMIT %d", 100),
|
||||
)
|
||||
if err != nil || len(msgs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.sender.IsFull() {
|
||||
log.Warn("layer1 sender pending tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
msg, msgs = msgs[0], msgs[1:]
|
||||
|
||||
blockNumber = mathutil.MaxUint64(blockNumber, msg.Height)
|
||||
|
||||
data, err := r.packRelayMessage(msg)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
err = r.sender.LoadOrSendTx(
|
||||
common.HexToHash(msg.Layer2Hash),
|
||||
msg.MsgHash,
|
||||
&r.cfg.MessengerContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send l1 submitted tx", "msg hash", msg.MsgHash, "err", err)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
@@ -173,7 +92,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) packRelayMessage(msg *orm.L1Message) ([]byte, error) {
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
// @todo add support to relay multiple messages
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
@@ -191,17 +110,10 @@ func (r *Layer1Relayer) packRelayMessage(msg *orm.L1Message) ([]byte, error) {
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
data, err := r.packRelayMessage(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
@@ -222,6 +134,8 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer1Relayer) Start() {
|
||||
log.Info("Starting l1/relayer")
|
||||
|
||||
go func() {
|
||||
// trigger by timer
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -19,7 +20,10 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -21,10 +19,6 @@ import (
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -44,7 +38,7 @@ type Watcher struct {
|
||||
db database.OrmFactory
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
confirmations uint64
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -59,7 +53,7 @@ type Watcher struct {
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
@@ -87,6 +81,8 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
|
||||
|
||||
// Start the Watcher module.
|
||||
func (w *Watcher) Start() {
|
||||
log.Info("Starting l1/watcher")
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
@@ -97,13 +93,12 @@ func (w *Watcher) Start() {
|
||||
return
|
||||
|
||||
default:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
blockNumber, err := w.client.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
log.Error("Failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
if err := w.FetchContractEvent(blockNumber); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -125,7 +120,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
@@ -158,7 +153,6 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -223,7 +217,6 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,20 +2,28 @@ package l2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
@@ -30,11 +38,13 @@ type Layer2Relayer struct {
|
||||
db database.OrmFactory
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
messageSender *sender.Sender
|
||||
messageCh <-chan *sender.Confirmation
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
rollupSender *sender.Sender
|
||||
rollupCh <-chan *sender.Confirmation
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
@@ -66,103 +76,375 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layer2 := &Layer2Relayer{
|
||||
return &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
messageSender: messageSender,
|
||||
messageCh: messageSender.ConfirmChan(),
|
||||
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
|
||||
rollupSender: rollupSender,
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.RollupMetaABI,
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Deal with broken transactions.
|
||||
if err = layer2.prepare(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return layer2, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// prepare to run check logic and until it's finished.
|
||||
func (r *Layer2Relayer) prepare(ctx context.Context) error {
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
}
|
||||
const processMsgLimit = 100
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgPending},
|
||||
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// process messages in batches
|
||||
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
|
||||
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
|
||||
if size = len(msgs); size > batchSize {
|
||||
size = batchSize
|
||||
}
|
||||
}(ctx)
|
||||
var g errgroup.Group
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.checkSubmittedMessages(); err != nil {
|
||||
log.Error("failed to init layer2 submitted tx", "err", err)
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.checkCommittingBatches(); err != nil {
|
||||
log.Error("failed to init layer2 committed tx", "err", err)
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
|
||||
|
||||
if err := r.checkFinalizingBatches(); err != nil {
|
||||
log.Error("failed to init layer2 finalized tx", "err", err)
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever until message sender and roller sender are empty.
|
||||
utils.TryTimes(-1, func() bool {
|
||||
return r.messageSender.PendingCount() == 0 && r.rollupSender.PendingCount() == 0
|
||||
})
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batchesInDB) == 0 {
|
||||
return
|
||||
}
|
||||
id := batchesInDB[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
|
||||
return
|
||||
}
|
||||
batch := batches[0]
|
||||
|
||||
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
|
||||
if err != nil || len(traces) == 0 {
|
||||
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: batch.Index,
|
||||
ParentHash: common.HexToHash(batch.ParentHash),
|
||||
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
|
||||
}
|
||||
|
||||
parentHash := common.HexToHash(batch.ParentHash)
|
||||
for i, trace := range traces {
|
||||
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: parentHash,
|
||||
BaseFee: trace.Header.BaseFee,
|
||||
StateRoot: trace.StorageTrace.RootAfter,
|
||||
BlockHeight: trace.Header.Number.Uint64(),
|
||||
GasUsed: 0,
|
||||
Timestamp: trace.Header.Time,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
Value: tx.Value.ToInt(),
|
||||
Data: common.Hex2Bytes(tx.Data),
|
||||
R: tx.R.ToInt(),
|
||||
S: tx.S.ToInt(),
|
||||
V: tx.V.ToInt().Uint64(),
|
||||
}
|
||||
if tx.To != nil {
|
||||
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
|
||||
}
|
||||
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
|
||||
}
|
||||
|
||||
// for next iteration
|
||||
parentHash = layer2Batch.Blocks[i].BlockHash
|
||||
}
|
||||
|
||||
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
|
||||
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
|
||||
if err != nil && err.Error() == "execution reverted: Parent batch hasn't been committed" {
|
||||
|
||||
// check parent is committing
|
||||
batches, err = r.db.GetBlockBatches(map[string]interface{}{"end_block_hash": batch.ParentHash})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to get parent batch from db", "batch_id", id, "parent_hash", batch.ParentHash, "err", err)
|
||||
return
|
||||
}
|
||||
parentBatch := batches[0]
|
||||
|
||||
if parentBatch.RollupStatus >= orm.RollupCommitting {
|
||||
// retry with manual gas estimation
|
||||
gasLimit := estimateCommitBatchGas(len(data), len(layer2Batch.Blocks))
|
||||
hash, err = r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, gasLimit)
|
||||
log.Info("commitBatch tx resent with manual gas estimation ", "id", id, "index", batch.Index, "gasLimit", gasLimit, "hash", hash.String(), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
// set skipped batches in a single db operation
|
||||
if count, err := r.db.UpdateSkippedBatches(); err != nil {
|
||||
log.Error("UpdateSkippedBatches failed", "err", err)
|
||||
// continue anyway
|
||||
} else if count > 0 {
|
||||
log.Info("Skipping batches", "count", count)
|
||||
}
|
||||
|
||||
// batches are sorted by batch index in increasing order
|
||||
batches, err := r.db.GetCommittedBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) == 0 {
|
||||
return
|
||||
}
|
||||
id := batches[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
status, err := r.db.GetProvingStatusByID(id)
|
||||
if err != nil {
|
||||
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
|
||||
case orm.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "id", id)
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by id failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "id", id)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
hash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization.Store(txID, id)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
"block_status", status,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer2Relayer) Start() {
|
||||
loop := func(ctx context.Context, f func()) {
|
||||
log.Info("Starting l2/relayer")
|
||||
|
||||
go func() {
|
||||
// trigger by timer
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
f()
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(3)
|
||||
go r.ProcessSavedEvents(&wg)
|
||||
go r.ProcessPendingBatches(&wg)
|
||||
go r.ProcessCommittedBatches(&wg)
|
||||
wg.Wait()
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case <-r.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go loop(ctx, r.ProcessSavedEvents)
|
||||
go loop(ctx, r.ProcessPendingBatches)
|
||||
go loop(ctx, r.ProcessCommittedBatches)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -212,3 +494,12 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
}
|
||||
|
||||
func estimateCommitBatchGas(callDataLength int, numBlocks int) uint64 {
|
||||
gasLimit := uint64(0)
|
||||
gasLimit += 16 * uint64(callDataLength) // calldata cost
|
||||
gasLimit += 4*2100 + 3*22100 // fixed cost per batch
|
||||
gasLimit += 4 * 22100 * uint64(numBlocks) // cost per block in batch
|
||||
gasLimit = gasLimit * 12 / 10 // apply multiplier
|
||||
return gasLimit
|
||||
}
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
func (r *Layer2Relayer) checkCommittingBatches() error {
|
||||
var batchIndex uint64
|
||||
BEGIN:
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{"rollup_status": orm.RollupCommitting},
|
||||
fmt.Sprintf("AND index > %d", batchIndex),
|
||||
fmt.Sprintf("ORDER BY index ASC LIMIT %d", 10),
|
||||
)
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.rollupSender.IsFull() {
|
||||
log.Warn("layer2 rollup sender pending committed tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
batch, batches = batches[0], batches[1:]
|
||||
|
||||
id := batch.ID
|
||||
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
|
||||
|
||||
txStr, err := r.db.GetCommitTxHash(id)
|
||||
if err != nil {
|
||||
log.Error("failed to get commit_tx_hash from block_batch", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, data, err := r.packCommitBatch(id)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send committed tx", "batch id", id, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
err = r.rollupSender.LoadOrSendTx(
|
||||
common.HexToHash(txStr.String),
|
||||
txID,
|
||||
&r.cfg.RollupContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send tx", "batch id", id, "err", err)
|
||||
} else {
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packCommitBatch(id string) (*orm.BlockBatch, []byte, error) {
|
||||
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
|
||||
if err != nil || len(batches) == 0 {
|
||||
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
batch := batches[0]
|
||||
|
||||
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
|
||||
if err != nil || len(traces) == 0 {
|
||||
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
|
||||
BatchIndex: batch.Index,
|
||||
ParentHash: common.HexToHash(batch.ParentHash),
|
||||
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
|
||||
}
|
||||
|
||||
parentHash := common.HexToHash(batch.ParentHash)
|
||||
for i, trace := range traces {
|
||||
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
|
||||
BlockHash: trace.Header.Hash(),
|
||||
ParentHash: parentHash,
|
||||
BaseFee: trace.Header.BaseFee,
|
||||
StateRoot: trace.StorageTrace.RootAfter,
|
||||
BlockHeight: trace.Header.Number.Uint64(),
|
||||
GasUsed: 0,
|
||||
Timestamp: trace.Header.Time,
|
||||
ExtraData: make([]byte, 0),
|
||||
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
|
||||
}
|
||||
for j, tx := range trace.Transactions {
|
||||
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
|
||||
Caller: tx.From,
|
||||
Nonce: tx.Nonce,
|
||||
Gas: tx.Gas,
|
||||
GasPrice: tx.GasPrice.ToInt(),
|
||||
Value: tx.Value.ToInt(),
|
||||
Data: common.Hex2Bytes(tx.Data),
|
||||
R: tx.R.ToInt(),
|
||||
S: tx.S.ToInt(),
|
||||
V: tx.V.ToInt().Uint64(),
|
||||
}
|
||||
if tx.To != nil {
|
||||
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
|
||||
}
|
||||
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
|
||||
}
|
||||
|
||||
// for next iteration
|
||||
parentHash = layer2Batch.Blocks[i].BlockHash
|
||||
}
|
||||
|
||||
data, err := bridge_abi.RollupMetaABI.Pack("commitBatch", layer2Batch)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return batch, data, nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batchesInDB) == 0 {
|
||||
return
|
||||
}
|
||||
id := batchesInDB[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batch, data, err := r.packCommitBatch(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-commit"
|
||||
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
|
||||
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
func (r *Layer2Relayer) checkFinalizingBatches() error {
|
||||
var (
|
||||
batchLimit = 10
|
||||
batchIndex uint64
|
||||
)
|
||||
BEGIN:
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{"rollup_status": orm.RollupFinalizing},
|
||||
fmt.Sprintf("AND index > %d", batchIndex),
|
||||
fmt.Sprintf("ORDER BY index ASC LIMIT %d", batchLimit),
|
||||
)
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
for batch := batches[0]; len(batches) > 0; { //nolint:staticcheck
|
||||
// If pending txs pool is full, wait a while and retry.
|
||||
if r.rollupSender.IsFull() {
|
||||
log.Warn("layer2 rollup sender pending finalized tx reaches pending limit")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
batch, batches = batches[0], batches[1:]
|
||||
|
||||
id := batch.ID
|
||||
batchIndex = mathutil.MaxUint64(batchIndex, batch.Index)
|
||||
|
||||
txStr, err := r.db.GetFinalizeTxHash(id)
|
||||
if err != nil {
|
||||
log.Error("failed to get finalize_tx_hash from block_batch", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := r.packFinalizeBatch(id)
|
||||
if err != nil {
|
||||
log.Error("failed to pack finalize data", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
err = r.rollupSender.LoadOrSendTx(
|
||||
common.HexToHash(txStr.String),
|
||||
txID,
|
||||
&r.cfg.RollupContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send finalized tx", "batch id", id, "err", err)
|
||||
} else {
|
||||
r.processingFinalization.Store(txID, id)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packFinalizeBatch(id string) ([]byte, error) {
|
||||
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by id failed", "id", id, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if proofBuffer == nil || instanceBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "id", id)
|
||||
return nil, err
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
|
||||
return nil, err
|
||||
}
|
||||
if len(instanceBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(instanceBuffer)
|
||||
data, err := bridge_abi.RollupMetaABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// set skipped batches in a single db operation
|
||||
if count, err := r.db.UpdateSkippedBatches(); err != nil {
|
||||
log.Error("UpdateSkippedBatches failed", "err", err)
|
||||
// continue anyway
|
||||
} else if count > 0 {
|
||||
log.Info("Skipping batches", "count", count)
|
||||
}
|
||||
|
||||
// batches are sorted by batch index in increasing order
|
||||
batches, err := r.db.GetCommittedBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) == 0 {
|
||||
return
|
||||
}
|
||||
id := batches[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
status, err := r.db.GetProvingStatusByID(id)
|
||||
if err != nil {
|
||||
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskProved:
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
|
||||
case orm.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "id", id)
|
||||
success := false
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pack finalize data.
|
||||
data, err := r.packFinalizeBatch(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
txID := id + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
|
||||
hash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization.Store(txID, id)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
"block_status", status,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
package l2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/sender"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
)
|
||||
|
||||
const processMsgLimit = 100
|
||||
|
||||
func (r *Layer2Relayer) checkSubmittedMessages() error {
|
||||
var nonce uint64
|
||||
BEGIN:
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgSubmitted},
|
||||
fmt.Sprintf("AND nonce > %d", nonce),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
if err != nil || len(msgs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
var batch *orm.BlockBatch
|
||||
for msg := msgs[0]; len(msgs) > 0; { //nolint:staticcheck
|
||||
// If pending pool is full, wait a while and retry.
|
||||
if r.messageSender.IsFull() {
|
||||
log.Warn("layer2 message tx sender is full")
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
}
|
||||
msg, msgs = msgs[0], msgs[1:]
|
||||
nonce = mathutil.MaxUint64(nonce, msg.Nonce)
|
||||
|
||||
// Get batch by block number.
|
||||
if batch == nil || msg.Height < batch.StartBlockNumber || msg.Height > batch.EndBlockNumber {
|
||||
batches, err := r.db.GetBlockBatches(
|
||||
map[string]interface{}{},
|
||||
fmt.Sprintf("AND start_block_number <= %d AND end_block_number >= %d", msg.Height, msg.Height),
|
||||
)
|
||||
// If get batch failed, stop and return immediately.
|
||||
if err != nil || len(batches) == 0 {
|
||||
return err
|
||||
}
|
||||
batch = batches[0]
|
||||
}
|
||||
|
||||
data, err := r.packRelayMessage(msg, batch.Index)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.messageSender.LoadOrSendTx(
|
||||
common.HexToHash(msg.Layer1Hash),
|
||||
msg.MsgHash,
|
||||
&r.cfg.MessengerContractAddress,
|
||||
big.NewInt(0),
|
||||
data,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("failed to load or send l2 submitted tx", "batch id", batch.ID, "msg hash", msg.MsgHash, "err", err)
|
||||
} else {
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
}
|
||||
}
|
||||
goto BEGIN
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgPending},
|
||||
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// process messages in batches
|
||||
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
|
||||
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
|
||||
if size = len(msgs); size > batchSize {
|
||||
size = batchSize
|
||||
}
|
||||
var g errgroup.Group
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
data, err := r.packRelayMessage(msg, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
|
||||
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) packRelayMessage(msg *orm.L2Message, index uint64) ([]byte, error) {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
|
||||
deadline := big.NewInt(int64(msg.Deadline))
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := bridge_abi.L1MessengerMetaABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -94,7 +95,10 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessSavedEvents()
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
relayer.ProcessSavedEvents(&wg)
|
||||
wg.Wait()
|
||||
|
||||
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
|
||||
assert.NoError(t, err)
|
||||
@@ -150,7 +154,10 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
|
||||
// assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
relayer.ProcessPendingBatches(&wg)
|
||||
wg.Wait()
|
||||
|
||||
// Check if Rollup Result is changed successfully
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
@@ -187,7 +194,10 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
relayer.ProcessCommittedBatches(&wg)
|
||||
wg.Wait()
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
@@ -244,7 +254,10 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
|
||||
}
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
relayer.ProcessCommittedBatches(&wg)
|
||||
wg.Wait()
|
||||
|
||||
for _, id := range skipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
@@ -26,11 +24,6 @@ import (
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l2/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -46,7 +39,7 @@ type WatcherClient struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
confirmations rpc.BlockNumber
|
||||
confirmations uint64
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -60,7 +53,7 @@ type WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
@@ -83,6 +76,8 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
|
||||
// Start the Listening process
|
||||
func (w *WatcherClient) Start() {
|
||||
log.Info("Starting l2/watcher")
|
||||
|
||||
go func() {
|
||||
if reflect.ValueOf(w.orm).IsNil() {
|
||||
panic("must run L2 watcher with DB")
|
||||
@@ -101,12 +96,19 @@ func (w *WatcherClient) Start() {
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
// get current height
|
||||
number, err := w.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
log.Error("failed to get_BlockNumber", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if number >= w.confirmations {
|
||||
number = number - w.confirmations
|
||||
} else {
|
||||
number = 0
|
||||
}
|
||||
|
||||
w.tryFetchRunningMissingBlocks(ctx, number)
|
||||
}
|
||||
}
|
||||
@@ -123,12 +125,19 @@ func (w *WatcherClient) Start() {
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
// get current height
|
||||
number, err := w.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
log.Error("failed to get_BlockNumber", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if number >= w.confirmations {
|
||||
number = number - w.confirmations
|
||||
} else {
|
||||
number = 0
|
||||
}
|
||||
|
||||
w.FetchContractEvent(number)
|
||||
}
|
||||
}
|
||||
@@ -256,7 +265,6 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -289,7 +297,6 @@ func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
@@ -37,7 +36,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer rc.Stop()
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -45,7 +44,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
numTransactions := 3
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
for i := 0; i < numTransactions; i++ {
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil)
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
<-newSender.ConfirmChan()
|
||||
}
|
||||
@@ -191,8 +190,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, bpCfg, contractAddr, db)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
@@ -88,7 +86,6 @@ type Sender struct {
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
pendingNum int64 // current pending tx count.
|
||||
pendingTxs sync.Map // Mapping from nonce to pending transaction
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
@@ -123,15 +120,6 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
if config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -140,7 +128,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
auths: auths,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
baseFeePerGas: header.BaseFee.Uint64(),
|
||||
pendingTxs: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
@@ -150,16 +138,6 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// PendingCount return the current pending txs num.
|
||||
func (s *Sender) PendingCount() int64 {
|
||||
return atomic.LoadInt64(&s.pendingNum)
|
||||
}
|
||||
|
||||
// PendingLimit return the maximum pendingTxs can handle.
|
||||
func (s *Sender) PendingLimit() int64 {
|
||||
return s.config.PendingLimit
|
||||
}
|
||||
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
@@ -176,18 +154,21 @@ func (s *Sender) NumberOfAccounts() int {
|
||||
return len(s.auths.accounts)
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
|
||||
// estimate gas limit
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (*FeeData, error) {
|
||||
if gasLimit == 0 {
|
||||
// estimate gas limit
|
||||
var err error
|
||||
gasLimit, err = s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
|
||||
}
|
||||
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
|
||||
|
||||
// @todo change it when Scroll enable EIP1559
|
||||
if s.config.TxType != DynamicFeeTxType {
|
||||
// estimate gas price
|
||||
var gasPrice *big.Int
|
||||
gasPrice, err = s.client.SuggestGasPrice(s.ctx)
|
||||
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -210,27 +191,16 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsFull If pendingTxs pool is full return true.
|
||||
func (s *Sender) IsFull() bool {
|
||||
return atomic.LoadInt64(&s.pendingNum) == s.config.PendingLimit
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
|
||||
if s.IsFull() {
|
||||
return common.Hash{}, fmt.Errorf("pending txs is full, pending size: %d", s.config.PendingLimit)
|
||||
}
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (hash common.Hash, err error) {
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
|
||||
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
atomic.AddInt64(&s.pendingNum, 1)
|
||||
|
||||
// get
|
||||
auth := s.auths.getAccount()
|
||||
if auth == nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
return common.Hash{}, ErrNoAvailableAccount
|
||||
}
|
||||
|
||||
@@ -238,7 +208,6 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Delete(ID) // release the ID on failure
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -247,9 +216,10 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
tx *types.Transaction
|
||||
)
|
||||
// estimate gas fee
|
||||
if feeData, err = s.getFeeData(auth, target, value, data); err != nil {
|
||||
if feeData, err = s.getFeeData(auth, target, value, data, gasLimit); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
|
||||
// add pending transaction to queue
|
||||
pending := &PendingTransaction{
|
||||
@@ -266,61 +236,6 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Sender) getTxAndAddr(txHash common.Hash) (*types.Transaction, uint64, common.Address, error) {
|
||||
tx, isPending, err := s.client.TransactionByHash(s.ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
|
||||
sender, err := types.Sender(types.LatestSignerForChainID(s.chainID), tx)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
|
||||
if isPending {
|
||||
return tx, s.blockNumber, sender, nil
|
||||
}
|
||||
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, 0, common.Address{}, err
|
||||
}
|
||||
return tx, receipt.BlockNumber.Uint64(), sender, nil
|
||||
}
|
||||
|
||||
// LoadOrSendTx If the tx already exist in chain load it or resend it.
|
||||
func (s *Sender) LoadOrSendTx(destTxHash common.Hash, ID string, target *common.Address, value *big.Int, data []byte) error {
|
||||
tx, blockNumber, from, err := s.getTxAndAddr(destTxHash)
|
||||
// If this tx already exist load it to the pending.
|
||||
if err == nil && tx != nil {
|
||||
auth := s.auths.accounts[from]
|
||||
var feeData *FeeData
|
||||
feeData, err = s.getFeeData(auth, target, value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
|
||||
return fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
atomic.AddInt64(&s.pendingNum, 1)
|
||||
s.pendingTxs.Store(ID, &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: auth,
|
||||
// Record the transaction's block blockNumber.
|
||||
submitAt: blockNumber,
|
||||
feeData: feeData,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tx is dropped from chain node, resend it.
|
||||
_, err = s.SendTransaction(ID, target, value, data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
|
||||
var (
|
||||
nonce = auth.Nonce.Uint64()
|
||||
@@ -441,20 +356,11 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
|
||||
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
|
||||
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
|
||||
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
|
||||
func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
number := header.Number.Uint64()
|
||||
atomic.StoreUint64(&s.blockNumber, number)
|
||||
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
} else {
|
||||
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
s.pendingTxs.Range(func(key, value interface{}) bool {
|
||||
// ignore empty id, since we use empty id to occupy pending task
|
||||
if value == nil || reflect.ValueOf(value).IsNil() {
|
||||
@@ -464,9 +370,8 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
pending := value.(*PendingTransaction)
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
|
||||
s.pendingTxs.Delete(key)
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
ID: pending.id,
|
||||
@@ -498,7 +403,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
if strings.Contains(err.Error(), "nonce") {
|
||||
// This key can be deleted
|
||||
s.pendingTxs.Delete(key)
|
||||
atomic.AddInt64(&s.pendingNum, -1)
|
||||
// Try get receipt by the latest replaced tx hash
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
@@ -540,14 +444,7 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest confirmed block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
s.CheckPendingTransaction(header)
|
||||
case <-checkBalanceTicker.C:
|
||||
// Check and set balance.
|
||||
_ = s.auths.checkAndSetBalances(ctx)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
@@ -49,7 +48,6 @@ func TestSender(t *testing.T) {
|
||||
// Setup
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("testLoadOrSendTx", testLoadOrSendTx)
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
@@ -60,38 +58,6 @@ func TestSender(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testLoadOrSendTx(t *testing.T) {
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = 0
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newSender2, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := "aaa"
|
||||
|
||||
hash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(0), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = newSender2.LoadOrSendTx(hash, id, &toAddr, big.NewInt(0), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
select {
|
||||
case cfm := <-newSender2.ConfirmChan():
|
||||
assert.Equal(t, true, cfm.IsSuccessful)
|
||||
assert.Equal(t, hash, cfm.TxHash)
|
||||
assert.Equal(t, id, cfm.ID)
|
||||
case <-time.After(time.Second * 10):
|
||||
t.Error("testLoadOrSendTx test failed because of timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchSender(t *testing.T, batchSize int) {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
@@ -102,7 +68,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
}
|
||||
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
senderCfg.Confirmations = 0
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -121,7 +87,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
|
||||
@@ -4,20 +4,18 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"scroll-tech/common/docker"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -65,10 +63,10 @@ func setupEnv(t *testing.T) {
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L1Config.Confirmations = 0
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L2Config.Confirmations = 0
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
|
||||
|
||||
@@ -3,20 +3,19 @@ package tests
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
func testRelayL2MessageSucceed(t *testing.T) {
|
||||
@@ -26,6 +25,9 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
@@ -35,12 +37,11 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L2Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
@@ -110,7 +111,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
l2Relayer.ProcessPendingBatches(&wg)
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
@@ -131,7 +132,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
l2Relayer.ProcessCommittedBatches(&wg)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
@@ -152,7 +153,7 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
|
||||
// process l2 messages
|
||||
l2Relayer.ProcessSavedEvents()
|
||||
l2Relayer.ProcessSavedEvents(&wg)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgSubmitted)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
@@ -34,7 +35,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
@@ -78,8 +79,11 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var wg = sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
l2Relayer.ProcessPendingBatches(&wg)
|
||||
wg.Wait()
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
@@ -108,8 +112,10 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wg.Add(1)
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
l2Relayer.ProcessCommittedBatches(&wg)
|
||||
wg.Wait()
|
||||
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type ethClient interface {
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
|
||||
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
|
||||
switch true {
|
||||
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
|
||||
var tag *big.Int
|
||||
if confirm == rpc.FinalizedBlockNumber {
|
||||
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
|
||||
} else {
|
||||
tag = big.NewInt(int64(rpc.SafeBlockNumber))
|
||||
}
|
||||
|
||||
header, err := client.HeaderByNumber(ctx, tag)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !header.Number.IsInt64() {
|
||||
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
|
||||
}
|
||||
return header.Number.Uint64(), nil
|
||||
case confirm == rpc.LatestBlockNumber:
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return number, nil
|
||||
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cfmNum := uint64(confirm.Int64())
|
||||
|
||||
if number >= cfmNum {
|
||||
return number - cfmNum, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
tests = []struct {
|
||||
input string
|
||||
mustFail bool
|
||||
expected rpc.BlockNumber
|
||||
}{
|
||||
{`"0x"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x0"`, false, rpc.BlockNumber(0)},
|
||||
{`"0X1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x00"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x01"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x12"`, false, rpc.BlockNumber(18)},
|
||||
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
|
||||
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
|
||||
{"0", true, rpc.BlockNumber(0)},
|
||||
{`"ff"`, true, rpc.BlockNumber(0)},
|
||||
{`"safe"`, false, rpc.SafeBlockNumber},
|
||||
{`"finalized"`, false, rpc.FinalizedBlockNumber},
|
||||
{`"pending"`, false, rpc.PendingBlockNumber},
|
||||
{`"latest"`, false, rpc.LatestBlockNumber},
|
||||
{`"earliest"`, false, rpc.EarliestBlockNumber},
|
||||
{`someString`, true, rpc.BlockNumber(0)},
|
||||
{`""`, true, rpc.BlockNumber(0)},
|
||||
{``, true, rpc.BlockNumber(0)},
|
||||
}
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
err := json.Unmarshal([]byte(test.input), &num)
|
||||
if test.mustFail && err == nil {
|
||||
t.Errorf("Test %d should fail", i)
|
||||
continue
|
||||
}
|
||||
if !test.mustFail && err != nil {
|
||||
t.Errorf("Test %d should pass but got err: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if num != test.expected {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
want, err := json.Marshal(test.expected)
|
||||
assert.Nil(t, err)
|
||||
if !test.mustFail {
|
||||
err = json.Unmarshal([]byte(test.input), &num)
|
||||
assert.Nil(t, err)
|
||||
got, err := json.Marshal(&num)
|
||||
assert.Nil(t, err)
|
||||
if string(want) != string(got) {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MockEthClient struct {
|
||||
val uint64
|
||||
}
|
||||
|
||||
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
return e.val, nil
|
||||
}
|
||||
|
||||
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
|
||||
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
|
||||
}
|
||||
|
||||
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := MockEthClient{}
|
||||
|
||||
client.val = 5
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(0), confirmed)
|
||||
|
||||
client.val = 7
|
||||
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(1), confirmed)
|
||||
}
|
||||
@@ -32,15 +32,12 @@ RUN go mod download -x
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN mkdir -p /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
GO_VERSION := 1.18
|
||||
PYTHON_VERSION := 3.10
|
||||
RUST_VERSION := nightly-2022-12-10
|
||||
RUST_VERSION := nightly-2022-08-23
|
||||
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
|
||||
|
||||
@@ -4,4 +4,4 @@ FROM golang:1.18-alpine
|
||||
|
||||
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM golang:1.18-alpine
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
|
||||
# RUN apk add --no-cache libc6-compat
|
||||
# RUN apk add --no-cache gcompat
|
||||
|
||||
@@ -14,7 +14,7 @@ ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
|
||||
# TODO: make this ARG
|
||||
ENV CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
openssl-dev \
|
||||
gcc \
|
||||
git \
|
||||
musl-dev
|
||||
|
||||
@@ -13,4 +13,4 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
|
||||
@@ -11,6 +11,6 @@ if [ ! -n "${IPC_PATH}" ];then
|
||||
IPC_PATH="/tmp/l1geth_path.ipc"
|
||||
fi
|
||||
|
||||
exec geth --mine --datadir "." --unlock 0 --miner.etherbase "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63" --password "./password" --allow-insecure-unlock --nodiscover \
|
||||
exec geth --mine --datadir "." --unlock 0 --password "./password" --allow-insecure-unlock --nodiscover \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --ws --ws.addr "0.0.0.0" --ws.port 8546 --ipcpath ${IPC_PATH}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:prealpha-v5.1
|
||||
FROM scrolltech/l2geth:prealpha-v4.2
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
)
|
||||
@@ -67,7 +67,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
@@ -77,12 +77,12 @@ require (
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/net v0.5.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
golang.org/x/text v0.6.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -404,10 +404,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -481,8 +482,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -540,8 +541,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -605,8 +606,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -618,8 +619,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -5,7 +5,7 @@ edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lib]
|
||||
crate-type = ["dylib"]
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[dependencies]
|
||||
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(metrics.DefaultRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -12,9 +12,6 @@ var (
|
||||
&LogFileFlag,
|
||||
&LogJSONFormat,
|
||||
&LogDebugFlag,
|
||||
&MetricsEnabled,
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -45,25 +42,4 @@ var (
|
||||
Name: "log.debug",
|
||||
Usage: "Prepends log messages with call-site location (file and line number)",
|
||||
}
|
||||
// MetricsEnabled enable metrics collection and reporting
|
||||
MetricsEnabled = cli.BoolFlag{
|
||||
Name: "metrics",
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
Category: "METRICS",
|
||||
Value: false,
|
||||
}
|
||||
// MetricsAddr is listening address of Metrics reporting server
|
||||
MetricsAddr = cli.StringFlag{
|
||||
Name: "metrics.addr",
|
||||
Usage: "Metrics reporting server listening address",
|
||||
Category: "METRICS",
|
||||
Value: "0.0.0.0",
|
||||
}
|
||||
// MetricsPort is listening port of Metrics reporting server
|
||||
MetricsPort = cli.IntFlag{
|
||||
Name: "metrics.port",
|
||||
Usage: "Metrics reporting server listening port",
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -38,14 +38,10 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr,
|
||||
}
|
||||
|
||||
// StartWSEndpoint starts the WS RPC endpoint.
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, compressionLevel int) (*http.Server, net.Addr, error) {
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr, error) {
|
||||
handler, addr, err := StartHTTPEndpoint(endpoint, apis)
|
||||
if err == nil {
|
||||
srv := (handler.Handler).(*rpc.Server)
|
||||
err = srv.SetCompressionLevel(compressionLevel)
|
||||
if err != nil {
|
||||
log.Error("failed to set ws compression level", "compression level", compressionLevel, "err", err)
|
||||
}
|
||||
handler.Handler = srv.WebsocketHandler(nil)
|
||||
}
|
||||
return handler, addr, err
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -60,7 +59,7 @@ func TestStartWSEndpoint(t *testing.T) {
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
}, flate.NoCompression)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import "time"
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
func TryTimes(times int, run func() bool) {
|
||||
for i := 0; times == -1 || i < times; i++ {
|
||||
for i := 0; i < times; i++ {
|
||||
if run() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "prealpha-v13.2"
|
||||
var tag = "prealpha-v11.14"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
Submodule contracts/lib/forge-std updated: 662ae0d693...cb69e9c07f
@@ -13,34 +13,17 @@ import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { L2TxFeeVault } from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20 } from "../../src/libraries/token/ScrollStandardERC20.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract DeployL2BridgeContracts is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
address L1_TX_FEE_RECIPIENT_ADDR = vm.envAddress("L1_TX_FEE_RECIPIENT_ADDR");
|
||||
|
||||
L2ScrollMessenger messenger;
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
address L2_SCROLL_MESSENGER_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_MESSENGER_PREDEPLOY_ADDR", address(0));
|
||||
address L2_TX_FEE_VAULT_PREDEPLOY_ADDR = vm.envOr("L2_TX_FEE_VAULT_PREDEPLOY_ADDR", address(0));
|
||||
address L2_PROXY_ADMIN_PREDEPLOY_ADDR = vm.envOr("L2_PROXY_ADMIN_PREDEPLOY_ADDR", address(0));
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_WHITELIST_PREDEPLOY_ADDR = vm.envOr("L2_WHITELIST_PREDEPLOY_ADDR", address(0));
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployL2ScrollMessenger();
|
||||
deployTxFeeVault();
|
||||
deployProxyAdmin();
|
||||
deployL2StandardERC20Gateway();
|
||||
deployL2GatewayRouter();
|
||||
@@ -48,51 +31,24 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2CustomERC20Gateway();
|
||||
deployL2ERC721Gateway();
|
||||
deployL2ERC1155Gateway();
|
||||
deployL2Whitelist();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployL2ScrollMessenger() internal {
|
||||
if (L2_SCROLL_MESSENGER_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(L2_SCROLL_MESSENGER_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
messenger = new L2ScrollMessenger(owner);
|
||||
L2ScrollMessenger l2ScrollMessenger = new L2ScrollMessenger(owner);
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(messenger));
|
||||
}
|
||||
|
||||
function deployTxFeeVault() internal {
|
||||
if (L2_TX_FEE_VAULT_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(L2_TX_FEE_VAULT_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2TxFeeVault feeVault = new L2TxFeeVault(address(messenger), L1_TX_FEE_RECIPIENT_ADDR);
|
||||
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(feeVault));
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(l2ScrollMessenger));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
if (L2_PROXY_ADMIN_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(L2_PROXY_ADMIN_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(proxyAdmin));
|
||||
}
|
||||
|
||||
function deployL2StandardERC20Gateway() internal {
|
||||
if (L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR", address(L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2StandardERC20Gateway impl = new L2StandardERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -101,11 +57,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2GatewayRouter() internal {
|
||||
if (L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_GATEWAY_ROUTER_PROXY_ADDR", address(L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2GatewayRouter impl = new L2GatewayRouter();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -114,11 +65,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployScrollStandardERC20Factory() internal {
|
||||
if (L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR", address(L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollStandardERC20 tokenImpl = new ScrollStandardERC20();
|
||||
ScrollStandardERC20Factory scrollStandardERC20Factory = new ScrollStandardERC20Factory(address(tokenImpl));
|
||||
|
||||
@@ -127,11 +73,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2CustomERC20Gateway() internal {
|
||||
if (L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2CustomERC20Gateway impl = new L2CustomERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -140,11 +81,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC721Gateway() internal {
|
||||
if (L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC721_GATEWAY_PROXY_ADDR", address(L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC721Gateway impl = new L2ERC721Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -153,11 +89,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC1155Gateway() internal {
|
||||
if (L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC1155Gateway impl = new L2ERC1155Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -165,18 +96,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL2Whitelist() internal {
|
||||
if (L2_WHITELIST_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_WHITELIST_ADDR", address(L2_WHITELIST_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Whitelist whitelist = new Whitelist(owner);
|
||||
|
||||
logAddress("L2_WHITELIST_ADDR", address(whitelist));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
|
||||
@@ -3,13 +3,11 @@ pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2CustomERC20Gateway } from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import { L2ERC1155Gateway } from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract InitializeL2BridgeContracts is Script {
|
||||
@@ -22,14 +20,12 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_ADDR");
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
address L2_WHITELIST_ADDR = vm.envAddress("L2_WHITELIST_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
@@ -73,21 +69,6 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L2_SCROLL_MESSENGER_ADDR
|
||||
);
|
||||
|
||||
// whitelist contracts which can call sendMessage
|
||||
{
|
||||
address[] memory gateways = new address[](6);
|
||||
gateways[0] = L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[1] = L2_GATEWAY_ROUTER_PROXY_ADDR;
|
||||
gateways[2] = L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[3] = L2_ERC1155_GATEWAY_PROXY_ADDR;
|
||||
gateways[4] = L2_ERC721_GATEWAY_PROXY_ADDR;
|
||||
gateways[5] = L2_TX_FEE_VAULT_ADDR;
|
||||
Whitelist(L2_WHITELIST_ADDR).updateWhitelistStatus(gateways, true);
|
||||
}
|
||||
|
||||
// update whitelist contract for messenger
|
||||
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_ADDR)).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,10 +86,12 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
) external override onlyWhitelistedSender(msg.sender) {
|
||||
) external override {
|
||||
// anti reentrance
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
|
||||
// @todo only privileged accounts can call
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
require(_deadline >= block.timestamp, "Message expired");
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { FeeVault } from "../../libraries/FeeVault.sol";
|
||||
|
||||
/// @title L2TxFeeVault
|
||||
/// @notice The `L2TxFeeVault` contract collects all L2 transaction fees and allows withdrawing these fees to a predefined L1 address.
|
||||
/// The minimum withdrawal amount is 10 ether.
|
||||
contract L2TxFeeVault is FeeVault {
|
||||
/// @param _messenger The address of L2ScrollMessenger.
|
||||
/// @param _recipient The fee recipient address on L1.
|
||||
constructor(address _messenger, address _recipient) FeeVault(_messenger, _recipient, 10 ether) {}
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// MIT License
|
||||
|
||||
// Copyright (c) 2022 Optimism
|
||||
// Copyright (c) 2022 Scroll
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { IL2ScrollMessenger } from "../L2/IL2ScrollMessenger.sol";
|
||||
|
||||
/**
|
||||
* @title FeeVault
|
||||
* @notice The FeeVault contract contains the basic logic for the various different vault contracts
|
||||
* used to hold fee revenue generated by the L2 system.
|
||||
*/
|
||||
abstract contract FeeVault {
|
||||
/**
|
||||
* @notice Emits each time that a withdrawal occurs.
|
||||
*
|
||||
* @param value Amount that was withdrawn (in wei).
|
||||
* @param to Address that the funds were sent to.
|
||||
* @param from Address that triggered the withdrawal.
|
||||
*/
|
||||
event Withdrawal(uint256 value, address to, address from);
|
||||
|
||||
/**
|
||||
* @notice Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
uint256 public MIN_WITHDRAWAL_AMOUNT;
|
||||
|
||||
/**
|
||||
* @notice Scroll L2 messenger address.
|
||||
*/
|
||||
address public MESSENGER;
|
||||
|
||||
/**
|
||||
* @notice Wallet that will receive the fees on L1.
|
||||
*/
|
||||
address public RECIPIENT;
|
||||
|
||||
/**
|
||||
* @notice Total amount of wei processed by the contract.
|
||||
*/
|
||||
uint256 public totalProcessed;
|
||||
|
||||
/**
|
||||
* @param _recipient Wallet that will receive the fees on L1.
|
||||
* @param _minWithdrawalAmount Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
constructor(
|
||||
address _messenger,
|
||||
address _recipient,
|
||||
uint256 _minWithdrawalAmount
|
||||
) {
|
||||
MIN_WITHDRAWAL_AMOUNT = _minWithdrawalAmount;
|
||||
MESSENGER = _messenger;
|
||||
RECIPIENT = _recipient;
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice Allow the contract to receive ETH.
|
||||
*/
|
||||
receive() external payable {}
|
||||
|
||||
/**
|
||||
* @notice Triggers a withdrawal of funds to the L1 fee wallet.
|
||||
*/
|
||||
function withdraw() external {
|
||||
uint256 value = address(this).balance;
|
||||
|
||||
require(
|
||||
value >= MIN_WITHDRAWAL_AMOUNT,
|
||||
"FeeVault: withdrawal amount must be greater than minimum withdrawal amount"
|
||||
);
|
||||
|
||||
unchecked {
|
||||
totalProcessed += value;
|
||||
}
|
||||
|
||||
emit Withdrawal(value, RECIPIENT, msg.sender);
|
||||
|
||||
IL2ScrollMessenger(MESSENGER).sendMessage{ value: value }(
|
||||
RECIPIENT,
|
||||
0, // no fee provided
|
||||
bytes(""), // no message (simple eth transfer)
|
||||
0 // _gasLimit is not used for eth transfers
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { DSTestPlus } from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import { MockScrollMessenger } from "./mocks/MockScrollMessenger.sol";
|
||||
import { L2TxFeeVault } from "../L2/predeploys/L2TxFeeVault.sol";
|
||||
|
||||
contract L2TxFeeVaultTest is DSTestPlus {
|
||||
MockScrollMessenger private messenger;
|
||||
L2TxFeeVault private vault;
|
||||
|
||||
function setUp() public {
|
||||
messenger = new MockScrollMessenger();
|
||||
vault = new L2TxFeeVault(address(messenger), address(1));
|
||||
}
|
||||
|
||||
function testCantWithdrawBelowMinimum() public {
|
||||
hevm.deal(address(vault), 9 ether);
|
||||
hevm.expectRevert("FeeVault: withdrawal amount must be greater than minimum withdrawal amount");
|
||||
vault.withdraw();
|
||||
}
|
||||
|
||||
function testWithdrawOnce() public {
|
||||
hevm.deal(address(vault), 11 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 11 ether);
|
||||
assertEq(vault.totalProcessed(), 11 ether);
|
||||
}
|
||||
|
||||
function testWithdrawTwice() public {
|
||||
hevm.deal(address(vault), 11 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 11 ether);
|
||||
assertEq(vault.totalProcessed(), 11 ether);
|
||||
|
||||
hevm.deal(address(vault), 22 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 33 ether);
|
||||
assertEq(vault.totalProcessed(), 33 ether);
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@ test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
|
||||
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -103,7 +103,7 @@ func action(ctx *cli.Context) error {
|
||||
"%s:%d",
|
||||
ctx.String(wsListenAddrFlag.Name),
|
||||
ctx.Int(wsPortFlag.Name)),
|
||||
apis, cfg.RollerManagerConfig.CompressionLevel)
|
||||
apis)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"roller_manager_config": {
|
||||
"compression_level": 9,
|
||||
"rollers_per_session": 1,
|
||||
"verifier_endpoint": "/tmp/verifier.sock",
|
||||
"collection_time": 180,
|
||||
"token_time_to_live": 60,
|
||||
"verifier": {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
// RollerManagerConfig loads sequencer configuration items.
|
||||
type RollerManagerConfig struct {
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.18
|
||||
require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
@@ -29,14 +29,14 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -349,10 +349,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -423,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -539,8 +540,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -552,7 +553,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package coordinator_test
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
@@ -491,7 +490,7 @@ func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
// start ws service
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs())
|
||||
assert.NoError(t, err)
|
||||
|
||||
return rollerManager, handler
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
@@ -16,23 +16,17 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
aggVkPath = "../assets/agg_vk"
|
||||
proofPath = "../assets/agg_proof"
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: paramsPath,
|
||||
AggVkPath: aggVkPath,
|
||||
ParamsPath: "../assets/test_params",
|
||||
AggVkPath: "../assets/agg_vk",
|
||||
}
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
f, err := os.Open(proofPath)
|
||||
f, err := os.Open("../assets/agg_proof")
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -7,7 +7,7 @@ require (
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/mattn/go-sqlite3 v1.14.14
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
)
|
||||
@@ -24,12 +24,12 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -339,10 +339,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -411,8 +412,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -525,8 +526,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -236,29 +236,6 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetBatchesByRollupStatus(status RollupStatus, limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, status, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ids []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
break
|
||||
}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
if len(ids) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no pending batches in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ids, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
|
||||
if err != nil {
|
||||
|
||||
@@ -46,7 +46,6 @@ type L1Message struct {
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
@@ -63,7 +62,6 @@ type L2Message struct {
|
||||
Target string `json:"target" db:"target"`
|
||||
Calldata string `json:"calldata" db:"calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Status MsgStatus `json:"status" db:"status"`
|
||||
}
|
||||
|
||||
@@ -147,7 +145,6 @@ type BlockBatchOrm interface {
|
||||
ResetProvingStatusFor(before ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
|
||||
BatchRecordExist(id string) (bool, error)
|
||||
GetBatchesByRollupStatus(status RollupStatus, limit uint64) ([]string, error)
|
||||
GetPendingBatches(limit uint64) ([]string, error)
|
||||
GetCommittedBatches(limit uint64) ([]string, error)
|
||||
GetRollupStatus(id string) (RollupStatus, error)
|
||||
@@ -169,7 +166,6 @@ type L1MessageOrm interface {
|
||||
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
|
||||
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
|
||||
GetL1ProcessedNonce() (int64, error)
|
||||
GetL1Messages(fields map[string]interface{}, args ...string) ([]*L1Message, error)
|
||||
SaveL1Messages(ctx context.Context, messages []*L1Message) error
|
||||
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
|
||||
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -48,7 +46,7 @@ func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,37 +87,6 @@ func (m *l1MessageOrm) GetL1ProcessedNonce() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetL1Messages get l1 messages by k-v map and args.
|
||||
func (m *l1MessageOrm) GetL1Messages(fields map[string]interface{}, args ...string) ([]*L1Message, error) {
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash, status FROM l2_message WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf(" AND %s=:%s ", key, key)
|
||||
}
|
||||
query = strings.Join(append([]string{query}, args...), " ")
|
||||
|
||||
db := m.db
|
||||
rows, err := db.NamedQuery(db.Rebind(query), fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var msgs []*L1Message
|
||||
for rows.Next() {
|
||||
msg := &L1Message{}
|
||||
if err = rows.StructScan(&msg); err != nil {
|
||||
break
|
||||
}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no unprocessed layer2 messages in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgs, rows.Close()
|
||||
}
|
||||
|
||||
// SaveL1Messages batch save a list of layer1 messages
|
||||
func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
|
||||
if len(messages) == 0 {
|
||||
@@ -140,10 +107,9 @@ func (m *l1MessageOrm) SaveL1Messages(ctx context.Context, messages []*L1Message
|
||||
"deadline": msg.Deadline,
|
||||
"calldata": msg.Calldata,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
}
|
||||
}
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l1_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
|
||||
@@ -88,9 +88,9 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetL2Messages fetch list of messages given msg status
|
||||
// GetL2MessagesByStatus fetch list of messages given msg status
|
||||
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash, layer1_hash FROM l2_message WHERE 1 = 1 "
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
@@ -138,12 +138,11 @@ func (m *layer2MessageOrm) SaveL2Messages(ctx context.Context, messages []*L2Mes
|
||||
"gas_limit": msg.GasLimit,
|
||||
"deadline": msg.Deadline,
|
||||
"calldata": msg.Calldata,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
}
|
||||
}
|
||||
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
_, err := m.db.NamedExec(`INSERT INTO public.l2_message (nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash) VALUES (:nonce, :msg_hash, :height, :sender, :target, :value, :fee, :gas_limit, :deadline, :calldata, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
|
||||
@@ -256,7 +256,6 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
@@ -270,11 +269,9 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
@@ -295,6 +292,5 @@ gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUk
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
|
||||
@@ -10,7 +10,7 @@ else
|
||||
endif
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
rm -rf ./prover/lib && cp -r ../common/libzkp/interface ./prover/lib
|
||||
|
||||
roller: libzkp ## Build the Roller instance.
|
||||
|
||||
@@ -1,41 +1,200 @@
|
||||
{
|
||||
"coinbase": {
|
||||
"address": "0x7157f3b0aee00adbe3d8b6609eda9480e141065a",
|
||||
"nonce": 0,
|
||||
"balance": "0x0",
|
||||
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"header": {
|
||||
"parentHash": "0xde613062d01fdfb97065e60ac4bc0da9118e80c1e394007b68dafa542e043d53",
|
||||
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"miner": "0x0000000000000000000000000000000000000000",
|
||||
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"difficulty": "0x1",
|
||||
"number": "0x1",
|
||||
"gasLimit": "0x37f94131",
|
||||
"gasUsed": "0x0",
|
||||
"timestamp": "0x63808894",
|
||||
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000002e12fa7e17d64b31990ba42a4c726fc620c51ff9be07c1e151ee909f9a43329d0853a8902b60e94da9f3979fb91dec57022b8962c146e3c265c6b4eecc282d0600",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"nonce": "0x0000000000000000",
|
||||
"baseFeePerGas": "0x342770c0",
|
||||
"hash": "0xfa0235b7e860c08d5156a18c1f4d6fd89eed8202de7f3043bd10d46a4bb3f8c4"
|
||||
},
|
||||
"transactions": [],
|
||||
"storageTrace": {
|
||||
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"proofs": {
|
||||
"0x7157F3b0AEe00adBe3D8B6609edA9480E141065a": [
|
||||
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
|
||||
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
|
||||
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
]
|
||||
}
|
||||
},
|
||||
"executionResults": []
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
"blockTrace": {
|
||||
"number": "0x1",
|
||||
"hash": "0x5366b507fd5ec49c1090655d6858835823b179b1b2773a45654687e0db4ec627",
|
||||
"gasLimit": 939082033,
|
||||
"difficulty": "0x2",
|
||||
"baseFee": "0x342770c0",
|
||||
"coinbase": {
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
"time": 1663875646,
|
||||
"transactions": []
|
||||
},
|
||||
"storageTrace": {
|
||||
"rootBefore": "0x1358bd270133c112737e834e13d3fe6381d0cf9aea7afecb46d19188d078c451",
|
||||
"rootAfter": "0x1358bd270133c112737e834e13d3fe6381d0cf9aea7afecb46d19188d078c451",
|
||||
"proofs": {
|
||||
"0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571": [
|
||||
"0x001a4f0d7d9eb169b9a45c37b1a2995ef5d15849e7a582cb935ad18ed10363bfd91bdb4da71c0bc7067be54de6667dc1a8e2e4032141815a5fee2ea58014657014",
|
||||
"0x0129bdbea092f4f7e6de593fd1a16ddb50b1c2a6297d4ae141a60f8da631e4817504040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003635c9adc5dea00000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000204cb1ab63af5d8931ce09673ebd8ae2ce16fd6571000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
]
|
||||
}
|
||||
},
|
||||
"executionResults": [],
|
||||
"mptwitness": [
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"coinbase": {
|
||||
"address": "0xadf5218f7ca8c80d90ff63af5fef486af57c2096",
|
||||
"nonce": 0,
|
||||
"balance": "0x0",
|
||||
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"header": {
|
||||
"parentHash": "0x33292f2ec508af712c7f98dc3799021b4a3391dfa6456ef8041f8aa1556c1bc0",
|
||||
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"miner": "0x0000000000000000000000000000000000000000",
|
||||
"stateRoot": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"difficulty": "0x2",
|
||||
"number": "0x4",
|
||||
"gasLimit": "0x37cf50c2",
|
||||
"gasUsed": "0x0",
|
||||
"timestamp": "0x6380889d",
|
||||
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000006c7674d5a049e0d4e5d884745f98b17df096eb9814ce788e232bb55976ebba271a3b59cf5e5c69eeb08cb6679453e05ccc4f6279d023beb0e392816c16b113df00",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"nonce": "0x0000000000000000",
|
||||
"baseFeePerGas": "0x22f06c09",
|
||||
"hash": "0x4b1fb45bfaa6e7662cb1331312f10575997b976bbd772332681a9a005adfc329"
|
||||
},
|
||||
"transactions": [],
|
||||
"storageTrace": {
|
||||
"rootBefore": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"rootAfter": "0x00b5b217bbb123cc3ba125a02c3e85168ef125844d17f5190d0dfca3c847f5e8",
|
||||
"proofs": {
|
||||
"0xadf5218f7ca8C80d90Ff63af5FEF486Af57C2096": [
|
||||
"0x0023817270d692108d3f2583c4fdddb93f05840da992233af555384642d2d480e02c05b065c0e03cc9ea9c6f16cd37395379d47225f9adfe626a288ed94807bd46",
|
||||
"0x0012f68259c5658fa795d5efebf43f2cdda388eb1f15db83e305743c458fce44100b0c36cf61ec8e8522dcac76c3418bff6e2cb91215e5c61fbc0ec735aff79a3a",
|
||||
"0x0124307d227b4219bed858923ccd524f3a235905a749e4372e26522bc8a4f58e0a04040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021e19e0c9bab2400000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470000000000000000000000000000000000000000000000000000000000000000020222214dcc294b72e40d2f37111a1f966aaefdbdd000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
]
|
||||
}
|
||||
},
|
||||
"executionResults": []
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -3,7 +3,7 @@ module scroll-tech/roller
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
@@ -21,11 +21,11 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -323,10 +323,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -397,8 +398,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -511,8 +512,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
package prover
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.so -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
@@ -17,12 +17,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
proofDumpPath = "agg_proof"
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
)
|
||||
|
||||
type RPCTrace struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
ID int64 `json:"id"`
|
||||
Result *types.BlockTrace `json:"result"`
|
||||
}
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.ProverConfig{
|
||||
@@ -45,20 +50,11 @@ func TestFFI(t *testing.T) {
|
||||
as.NoError(err)
|
||||
byt, err = io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
trace := &types.BlockTrace{}
|
||||
as.NoError(json.Unmarshal(byt, trace))
|
||||
traces = append(traces, trace)
|
||||
rpcTrace := &RPCTrace{}
|
||||
as.NoError(json.Unmarshal(byt, rpcTrace))
|
||||
traces = append(traces, rpcTrace.Result)
|
||||
}
|
||||
proof, err := prover.Prove(traces)
|
||||
_, err = prover.Prove(traces)
|
||||
as.NoError(err)
|
||||
t.Log("prove success")
|
||||
|
||||
// dump the proof
|
||||
os.RemoveAll(proofDumpPath)
|
||||
proofByt, err := json.Marshal(proof)
|
||||
as.NoError(err)
|
||||
proofFile, err := os.Create(proofDumpPath)
|
||||
as.NoError(err)
|
||||
_, err = proofFile.Write(proofByt)
|
||||
as.NoError(err)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -120,10 +119,10 @@ func runSender(t *testing.T, endpoint string) *sender.Sender {
|
||||
Endpoint: endpoint,
|
||||
CheckPendingTime: 3,
|
||||
EscalateBlocks: 100,
|
||||
Confirmations: rpc.LatestBlockNumber,
|
||||
Confirmations: 0,
|
||||
EscalateMultipleNum: 11,
|
||||
EscalateMultipleDen: 10,
|
||||
TxType: "LegacyTx",
|
||||
TxType: "DynamicFeeTx",
|
||||
}, []*ecdsa.PrivateKey{priv})
|
||||
assert.NoError(t, err)
|
||||
return newSender
|
||||
|
||||
@@ -3,7 +3,7 @@ module scroll-tech/integration-test
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/stretchr/testify v1.8.0
|
||||
)
|
||||
|
||||
@@ -14,7 +14,7 @@ require (
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -304,7 +304,8 @@ github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XF
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -362,7 +363,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -473,7 +475,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
@@ -24,9 +16,6 @@ func TestIntegration(t *testing.T) {
|
||||
// test bridge service
|
||||
t.Run("testStartProcess", testStartProcess)
|
||||
|
||||
// test monitor metrics
|
||||
t.Run("testMonitorMetrics", testMonitorMetrics)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
@@ -54,28 +43,3 @@ func testStartProcess(t *testing.T) {
|
||||
bridgeCmd.WaitExit()
|
||||
coordinatorCmd.WaitExit()
|
||||
}
|
||||
|
||||
func testMonitorMetrics(t *testing.T) {
|
||||
// migrate db.
|
||||
runDBCliApp(t, "reset", "successful to reset")
|
||||
runDBCliApp(t, "migrate", "current version:")
|
||||
|
||||
// Start bridge process with metrics server.
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
svrPort := strconv.FormatInt(port.Int64()+50000, 10)
|
||||
bridgeCmd := runBridgeApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
bridgeCmd.RunApp(func() bool { return bridgeCmd.WaitResult(time.Second*20, "Start bridge successfully") })
|
||||
|
||||
// Get monitor metrics.
|
||||
resp, err := http.Get("http://localhost:" + svrPort)
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
bodyStr := string(body)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
assert.Equal(t, true, strings.Contains(bodyStr, "bridge_l1_msg_sync_height"))
|
||||
assert.Equal(t, true, strings.Contains(bodyStr, "bridge_l2_msg_sync_height"))
|
||||
|
||||
bridgeCmd.WaitExit()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user