mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e09b98f0ed | ||
|
|
a6665264db | ||
|
|
517469a55d | ||
|
|
7c95208178 | ||
|
|
27affe4e6b | ||
|
|
0313f1651c | ||
|
|
160f4c447a | ||
|
|
72f88bae5e | ||
|
|
daca3ae6eb | ||
|
|
073e9e883c | ||
|
|
cce5c6c62e | ||
|
|
1ab9cf2de6 | ||
|
|
85e2e7ae94 |
@@ -18,7 +18,7 @@
|
||||
├── <a href="./database">database</a>: Database client and schema definition
|
||||
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
|
||||
├── <a href="./rollup">rollup</a>: Rollup-related services
|
||||
├── <a href="./scroll-contracts">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
|
||||
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
|
||||
└── <a href="./tests">tests</a>: Integration tests
|
||||
</pre>
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -19,7 +19,9 @@
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4"
|
||||
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
|
||||
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
|
||||
@@ -30,6 +31,8 @@ type FetcherConfig struct {
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
|
||||
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
|
||||
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
@@ -64,5 +67,11 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -168,6 +168,14 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
|
||||
case backendabi.L1DepositWrappedTokenSig:
|
||||
event := backendabi.WrappedTokenMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
|
||||
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
case backendabi.L1SentMessageEventSig:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
|
||||
|
||||
@@ -51,11 +51,8 @@ type L1FetcherLogic struct {
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
@@ -69,11 +66,8 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
}
|
||||
|
||||
gatewayList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
@@ -105,6 +99,26 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
@@ -210,7 +224,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 15)
|
||||
query.Topics[0] = make([]common.Hash, 16)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
@@ -226,6 +240,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
|
||||
@@ -54,7 +54,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
@@ -68,7 +67,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
@@ -100,6 +98,11 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L2FetcherLogic{
|
||||
|
||||
@@ -9,10 +9,14 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/modern-go/reflect2"
|
||||
"github.com/scroll-tech/go-ethereum/core"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
@@ -78,3 +82,89 @@ func ReadGenesis(genesisPath string) (*core.Genesis, error) {
|
||||
}
|
||||
return genesis, file.Close()
|
||||
}
|
||||
|
||||
// OverrideConfigWithEnv recursively overrides config values with environment variables
|
||||
func OverrideConfigWithEnv(cfg interface{}, prefix string) error {
|
||||
v := reflect.ValueOf(cfg)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
v = v.Elem()
|
||||
|
||||
t := v.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
fieldValue := v.Field(i)
|
||||
|
||||
if !fieldValue.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := field.Tag.Get("json")
|
||||
if tag == "" {
|
||||
tag = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
envKey := prefix + "_" + strings.ToUpper(tag)
|
||||
|
||||
switch fieldValue.Kind() {
|
||||
case reflect.Ptr:
|
||||
if !fieldValue.IsNil() {
|
||||
err := OverrideConfigWithEnv(fieldValue.Interface(), envKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
err := OverrideConfigWithEnv(fieldValue.Addr().Interface(), envKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if envValue, exists := os.LookupEnv(envKey); exists {
|
||||
log.Info("Overriding config with env var", "key", envKey)
|
||||
err := setField(fieldValue, envValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setField sets the value of a field based on the environment variable value
|
||||
func setField(field reflect.Value, value string) error {
|
||||
switch field.Kind() {
|
||||
case reflect.String:
|
||||
field.SetString(value)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
intValue, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.SetInt(intValue)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
uintValue, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.SetUint(uintValue)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatValue, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.SetFloat(floatValue)
|
||||
case reflect.Bool:
|
||||
boolValue, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.SetBool(boolValue)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type: %v", field.Kind())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.37"
|
||||
var tag = "v4.4.46"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ProverManager loads sequencer configuration items.
|
||||
@@ -72,5 +73,11 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
@@ -58,8 +59,19 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
publicKey := v.PublicKey
|
||||
if publicKey == "" {
|
||||
var err error
|
||||
publicKey, err = v.RecoverPublicKeyFromSignature()
|
||||
if err != nil {
|
||||
// do not handle error here since already called v.Verify() beforehands so there should be no error
|
||||
// add log just in case some error happens
|
||||
log.Error("impossible path: failed to recover public key from signature", "error", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
}
|
||||
|
||||
@@ -59,6 +59,8 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
if login.PublicKey != "" {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
}
|
||||
@@ -84,13 +86,14 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType)
|
||||
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
|
||||
}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk)
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
|
||||
@@ -29,9 +29,8 @@ import (
|
||||
type BatchProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
@@ -47,10 +46,6 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
},
|
||||
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_attempts_exceed_total",
|
||||
Help: "Total number of batch attempts exceed.",
|
||||
}),
|
||||
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
Help: "Total number of batch get task.",
|
||||
|
||||
@@ -27,9 +27,8 @@ import (
|
||||
type BundleProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
bundleAttemptsExceedTotal prometheus.Counter
|
||||
bundleTaskGetTaskTotal *prometheus.CounterVec
|
||||
bundleTaskGetTaskProver *prometheus.CounterVec
|
||||
bundleTaskGetTaskTotal *prometheus.CounterVec
|
||||
bundleTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBundleProverTask new a bundle collector
|
||||
@@ -46,10 +45,6 @@ func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *g
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
},
|
||||
bundleAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_attempts_exceed_total",
|
||||
Help: "Total number of bundle attempts exceed.",
|
||||
}),
|
||||
bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_get_task_total",
|
||||
Help: "Total number of bundle get task.",
|
||||
|
||||
@@ -27,9 +27,8 @@ import (
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
@@ -44,10 +43,6 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
},
|
||||
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_attempts_exceed_total",
|
||||
Help: "Total number of chunk attempts exceed.",
|
||||
}),
|
||||
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
Help: "Total number of chunk get task.",
|
||||
|
||||
@@ -26,6 +26,22 @@ type LoginSchema struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// TODO just use for darwin upgrade, need delete next upgrade
|
||||
type identity struct {
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
func (i *identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
@@ -77,6 +93,28 @@ func (a *LoginParameter) Verify() (bool, error) {
|
||||
return isValid, nil
|
||||
}
|
||||
|
||||
// RecoverPublicKeyFromSignature get public key from signature.
|
||||
// This method is for pre-darwin's compatible.
|
||||
func (a *LoginParameter) RecoverPublicKeyFromSignature() (string, error) {
|
||||
curieIdentity := identity{
|
||||
ProverName: a.Message.ProverName,
|
||||
ProverVersion: a.Message.ProverVersion,
|
||||
Challenge: a.Message.Challenge,
|
||||
}
|
||||
|
||||
hash, err := curieIdentity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Message) Hash() ([]byte, error) {
|
||||
|
||||
@@ -14,6 +14,8 @@ use types::*;
|
||||
|
||||
use crate::{config::Config, key_signer::KeySigner};
|
||||
|
||||
pub use errors::ProofStatusNotOKError;
|
||||
|
||||
pub struct CoordinatorClient<'a> {
|
||||
api: Api,
|
||||
token: Option<String>,
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use super::types::*;
|
||||
use crate::{coordinator_client::ProofStatusNotOKError, types::ProofStatus};
|
||||
|
||||
use super::{errors::*, types::*};
|
||||
use anyhow::{bail, Result};
|
||||
use core::time::Duration;
|
||||
use reqwest::{header::CONTENT_TYPE, Url};
|
||||
@@ -76,7 +78,23 @@ impl Api {
|
||||
token: &String,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let method = "/coordinator/v1/submit_proof";
|
||||
self.post_with_token(method, req, token).await
|
||||
let response = self
|
||||
.post_with_token::<SubmitProofRequest, Response<SubmitProofResponseData>>(
|
||||
method, req, token,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// when req's status already not ok, we mark the error returned from coordinator and will
|
||||
// ignore it later.
|
||||
if response.errcode == ErrorCode::ErrCoordinatorHandleZkProofFailure
|
||||
&& req.status != ProofStatus::Ok
|
||||
&& response
|
||||
.errmsg
|
||||
.contains("validator failure proof msg status not ok")
|
||||
{
|
||||
return Err(anyhow::anyhow!(ProofStatusNotOKError));
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn post_with_token<Req, Resp>(
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use serde::{Deserialize, Deserializer};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ErrorCode {
|
||||
@@ -51,3 +52,14 @@ impl<'de> Deserialize<'de> for ErrorCode {
|
||||
Ok(ErrorCode::from_i32(v))
|
||||
}
|
||||
}
|
||||
|
||||
// ====================================================
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProofStatusNotOKError;
|
||||
|
||||
impl fmt::Display for ProofStatusNotOKError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "proof status not ok")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ struct Args {
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
fn start() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
@@ -76,3 +76,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let result = start();
|
||||
if let Err(e) = result {
|
||||
log::error!("main exit with error {:#}", e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{prover::Prover, task_cache::TaskCache};
|
||||
use super::{coordinator_client::ProofStatusNotOKError, prover::Prover, task_cache::TaskCache};
|
||||
use anyhow::{Context, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
@@ -16,7 +16,11 @@ impl<'a> TaskProcessor<'a> {
|
||||
loop {
|
||||
log::info!("start a new round.");
|
||||
if let Err(err) = self.prove_and_submit() {
|
||||
log::error!("encounter error: {:#}", err);
|
||||
if err.is::<ProofStatusNotOKError>() {
|
||||
log::info!("proof status not ok, downgrade level to info.");
|
||||
} else {
|
||||
log::error!("encounter error: {:#}", err);
|
||||
}
|
||||
} else {
|
||||
log::info!("prove & submit succeed.");
|
||||
}
|
||||
@@ -54,11 +58,18 @@ impl<'a> TaskProcessor<'a> {
|
||||
);
|
||||
let result = match self.prover.prove_task(&task_wrapper.task) {
|
||||
Ok(proof_detail) => self.prover.submit_proof(proof_detail, &task_wrapper.task),
|
||||
Err(error) => self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::NoPanic,
|
||||
error,
|
||||
),
|
||||
Err(error) => {
|
||||
log::error!(
|
||||
"failed to prove task, id: {}, error: {:#}",
|
||||
&task_wrapper.task.id,
|
||||
error
|
||||
);
|
||||
self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::NoPanic,
|
||||
error,
|
||||
)
|
||||
}
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -119,7 +119,6 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
if let Some(handler) = &self.current_circuit {
|
||||
Ok(handler.clone())
|
||||
} else {
|
||||
log::error!("missing cached handler, there must be something wrong.");
|
||||
bail!("missing cached handler, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
@@ -136,7 +135,6 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
self.current_circuit = Some(rc_handler.clone());
|
||||
Ok(rc_handler)
|
||||
} else {
|
||||
log::error!("missing builder, there must be something wrong.");
|
||||
bail!("missing builder, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,10 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000,
|
||||
"l1_base_fee_weight": 0.132,
|
||||
"l1_blob_base_fee_weight": 0.145
|
||||
"l1_blob_base_fee_weight": 0.145,
|
||||
"check_committed_batches_window_minutes": 5,
|
||||
"l1_base_fee_default": 15000000000,
|
||||
"l1_blob_base_fee_default": 1
|
||||
},
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
@@ -41,7 +44,8 @@
|
||||
"max_blob_gas_price": 10000000000000,
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"check_pending_time": 1,
|
||||
"min_gas_tip": 100000000
|
||||
"min_gas_tip": 100000000,
|
||||
"max_pending_blob_txs": 3
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Config load configuration items.
|
||||
@@ -28,5 +29,11 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_ROLLUP")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,8 @@ type SenderConfig struct {
|
||||
MaxBlobGasPrice uint64 `json:"max_blob_gas_price"`
|
||||
// The transaction type to use: LegacyTx, DynamicFeeTx, BlobTx
|
||||
TxType string `json:"tx_type"`
|
||||
// The maximum number of pending blob-carrying transactions
|
||||
MaxPendingBlobTxs int64 `json:"max_pending_blob_txs"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
@@ -82,6 +84,10 @@ type GasOracleConfig struct {
|
||||
L1BaseFeeWeight float64 `json:"l1_base_fee_weight"`
|
||||
// The weight for L1 blob base fee.
|
||||
L1BlobBaseFeeWeight float64 `json:"l1_blob_base_fee_weight"`
|
||||
// CheckCommittedBatchesWindowMinutes the time frame to check if we committed batches to decide to update gas oracle or not in minutes
|
||||
CheckCommittedBatchesWindowMinutes int `json:"check_committed_batches_window_minutes"`
|
||||
L1BaseFeeDefault uint64 `json:"l1_base_fee_default"`
|
||||
L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"`
|
||||
}
|
||||
|
||||
// relayerConfigAlias RelayerConfig alias name
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -43,6 +45,7 @@ type Layer1Relayer struct {
|
||||
|
||||
l1BlockOrm *orm.L1Block
|
||||
l2BlockOrm *orm.L2Block
|
||||
batchOrm *orm.Batch
|
||||
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
@@ -84,6 +87,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
ctx: ctx,
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
|
||||
@@ -150,6 +154,19 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
|
||||
if r.shouldUpdateGasOracle(baseFee, blobBaseFee, isCurie) {
|
||||
// It indicates the committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked.
|
||||
// If we are not committing batches due to high fees then we shouldn't update fees to prevent users from paying high l1_data_fee
|
||||
// Also, set fees to some default value, because we have already updated fees to some high values, probably
|
||||
var reachTimeout bool
|
||||
if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && err == nil {
|
||||
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
|
||||
return
|
||||
}
|
||||
baseFee = r.cfg.GasOracleConfig.L1BaseFeeDefault
|
||||
blobBaseFee = r.cfg.GasOracleConfig.L1BlobBaseFeeDefault
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
var data []byte
|
||||
if isCurie {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
@@ -260,3 +277,18 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) commitBatchReachTimeout() (bool, error) {
|
||||
fields := map[string]interface{}{
|
||||
"rollup_status IN ?": []types.RollupStatus{types.RollupCommitted, types.RollupFinalizing, types.RollupFinalized},
|
||||
}
|
||||
orderByList := []string{"index DESC"}
|
||||
limit := 1
|
||||
batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit)
|
||||
if err != nil {
|
||||
log.Warn("failed to fetch latest committed, finalizing or finalized batch", "err", err)
|
||||
return false, err
|
||||
}
|
||||
// len(batches) == 0 probably shouldn't ever happen, but need to check this
|
||||
return len(batches) == 0 || utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute, nil
|
||||
}
|
||||
|
||||
@@ -414,6 +414,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, blob, fallbackGasLimit)
|
||||
if err != nil {
|
||||
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
|
||||
log.Debug(
|
||||
"Skipped sending commitBatch tx to L1: too many pending blob txs",
|
||||
"maxPending", r.cfg.SenderConfig.MaxPendingBlobTxs,
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", dbBatch.Index,
|
||||
@@ -471,7 +480,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBatch(batch, true); err != nil {
|
||||
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
@@ -522,7 +530,7 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", bundle.Hash)
|
||||
log.Info("Start to roll up zk proof", "bundle hash", bundle.Hash)
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBundle(bundle, true); err != nil {
|
||||
log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
@@ -589,11 +597,15 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
|
||||
var calldata []byte
|
||||
if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else if !r.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv1
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
@@ -608,6 +620,8 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
|
||||
@@ -8,26 +8,27 @@ import (
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL2RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
rollupL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal prometheus.Counter
|
||||
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL2RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
rollupL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -46,6 +47,10 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total",
|
||||
Help: "The total number of layer2 process pending batch failed on too many pending blob txs",
|
||||
}),
|
||||
rollupL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
|
||||
@@ -39,6 +39,11 @@ const (
|
||||
DynamicFeeTxType = "DynamicFeeTx"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooManyPendingBlobTxs
|
||||
ErrTooManyPendingBlobTxs = errors.New("the limit of pending blob-carrying transactions has been exceeded")
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
type Confirmation struct {
|
||||
ContextID string
|
||||
@@ -180,6 +185,23 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
)
|
||||
|
||||
if blob != nil {
|
||||
// check that number of pending blob-carrying txs is not too big
|
||||
if s.senderType == types.SenderTypeCommitBatch {
|
||||
var numPendingTransactions int64
|
||||
// We should count here only blob-carrying txs, but due to check that blob != nil, we know that we already switched to blobs.
|
||||
// Now all txs with SenderTypeCommitBatch will be blob-carrying, but some of previous pending txs could still be non-blob.
|
||||
// But this can happen only once at the moment of switching from non-blob to blob (pre-Bernoulli and post-Bernoulli) and it doesn't break anything.
|
||||
// So don't need to add check that tx carries blob
|
||||
numPendingTransactions, err = s.pendingTransactionOrm.GetCountPendingTransactionsBySenderType(s.ctx, s.senderType)
|
||||
if err != nil {
|
||||
log.Error("failed to count pending transactions", "err: %w", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to count pending transactions, err: %w", err)
|
||||
}
|
||||
if numPendingTransactions >= s.config.MaxPendingBlobTxs {
|
||||
return common.Hash{}, ErrTooManyPendingBlobTxs
|
||||
}
|
||||
|
||||
}
|
||||
sidecar, err = makeSidecar(blob)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
@@ -518,6 +540,22 @@ func (s *Sender) checkPendingTransaction() {
|
||||
}
|
||||
} else if txnToCheck.Status == types.TxStatusPending && // Only try resubmitting a new transaction based on gas price of the last transaction (status pending) with same ContextID.
|
||||
s.config.EscalateBlocks+txnToCheck.SubmitBlockNumber <= blockNumber {
|
||||
|
||||
// blockNumber is the block number with "latest" tag, so we need to check the current nonce of the sender address to ensure that the previous transaction has been confirmed.
|
||||
// otherwise it's not very necessary to bump the gas price. Also worth noting is that, during bumping gas prices, the sender would consider the new basefee and blobbasefee of L1.
|
||||
currentNonce, err := s.client.NonceAt(s.ctx, common.HexToAddress(txnToCheck.SenderAddress), new(big.Int).SetUint64(blockNumber))
|
||||
if err != nil {
|
||||
log.Error("failed to get current nonce from node", "address", txnToCheck.SenderAddress, "blockNumber", blockNumber, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// early return if the previous transaction has not been confirmed yet.
|
||||
// currentNonce is already the confirmed nonce + 1.
|
||||
if tx.Nonce() > currentNonce {
|
||||
log.Debug("previous transaction not yet confirmed, skip bumping gas price", "address", txnToCheck.SenderAddress, "currentNonce", currentNonce, "txNonce", tx.Nonce())
|
||||
continue
|
||||
}
|
||||
|
||||
// It's possible that the pending transaction was marked as failed earlier in this loop (e.g., if one of its replacements has already been confirmed).
|
||||
// Therefore, we fetch the current transaction status again for accuracy before proceeding.
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, tx.Hash())
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -120,7 +121,6 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
func TestSender(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test send and retrieve transaction", testSendAndRetrieveTransaction)
|
||||
t.Run("test fallback gas limit", testFallbackGasLimit)
|
||||
@@ -130,11 +130,13 @@ func TestSender(t *testing.T) {
|
||||
t.Run("test resubmit under priced transaction", testResubmitUnderpricedTransaction)
|
||||
t.Run("test resubmit dynamic fee transaction with rising base fee", testResubmitDynamicFeeTransactionWithRisingBaseFee)
|
||||
t.Run("test resubmit blob transaction with rising base fee and blob base fee", testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee)
|
||||
t.Run("test resubmit nonce gapped transaction", testResubmitNonceGappedTransaction)
|
||||
t.Run("test check pending transaction tx confirmed", testCheckPendingTransactionTxConfirmed)
|
||||
t.Run("test check pending transaction resubmit tx confirmed", testCheckPendingTransactionResubmitTxConfirmed)
|
||||
t.Run("test check pending transaction replaced tx confirmed", testCheckPendingTransactionReplacedTxConfirmed)
|
||||
t.Run("test check pending transaction multiple times with only one transaction pending", testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending)
|
||||
t.Run("test blob transaction with blobhash op contract call", testBlobTransactionWithBlobhashOpContractCall)
|
||||
t.Run("test test send blob-carrying tx over limit", testSendBlobCarryingTxOverLimit)
|
||||
}
|
||||
|
||||
func testNewSender(t *testing.T) {
|
||||
@@ -517,6 +519,67 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
func testResubmitNonceGappedTransaction(t *testing.T) {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
|
||||
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 110
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
cfgCopy.TxType = txType
|
||||
|
||||
// resubmit immediately if not nonce gapped
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
|
||||
// stop background check pending transaction
|
||||
cfgCopy.CheckPendingTime = math.MaxUint32
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
patchGuard1 := gomonkey.ApplyMethodFunc(s.client, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
// simulating not confirmed transaction
|
||||
patchGuard2 := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
|
||||
return nil, errors.New("simulated transaction receipt error")
|
||||
})
|
||||
|
||||
_, err = s.SendTransaction("test-1", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = s.SendTransaction("test-2", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s.checkPendingTransaction()
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 10)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 3)
|
||||
|
||||
assert.Equal(t, txs[0].Nonce, txs[1].Nonce)
|
||||
assert.Equal(t, txs[0].Nonce+1, txs[2].Nonce)
|
||||
|
||||
// the first 2 transactions have the same nonce, with one replaced and another pending
|
||||
assert.Equal(t, types.TxStatusReplaced, txs[0].Status)
|
||||
assert.Equal(t, types.TxStatusPending, txs[1].Status)
|
||||
|
||||
// the third transaction has nonce + 1, which will not be replaced due to the nonce gap,
|
||||
// thus the status should be pending
|
||||
assert.Equal(t, types.TxStatusPending, txs[2].Status)
|
||||
|
||||
s.Stop()
|
||||
patchGuard1.Reset()
|
||||
patchGuard2.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckPendingTransactionTxConfirmed(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
@@ -818,3 +881,22 @@ func randFieldElement() [32]byte {
|
||||
|
||||
return gokzg4844.SerializeScalar(r)
|
||||
}
|
||||
|
||||
func testSendBlobCarryingTxOverLimit(t *testing.T) {
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = "DynamicFeeTx"
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cfgCopy.MaxPendingBlobTxs); i++ {
|
||||
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
assert.ErrorIs(t, err, ErrTooManyPendingBlobTxs)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
@@ -85,6 +85,19 @@ func (o *PendingTransaction) GetPendingOrReplacedTransactionsBySenderType(ctx co
|
||||
return transactions, nil
|
||||
}
|
||||
|
||||
// GetCountPendingTransactionsBySenderType retrieves number of pending transactions filtered by sender type
|
||||
func (o *PendingTransaction) GetCountPendingTransactionsBySenderType(ctx context.Context, senderType types.SenderType) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("sender_type = ?", senderType)
|
||||
db = db.Where("status = ?", types.TxStatusPending)
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("failed to count pending transactions by sender type, error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetConfirmedTransactionsBySenderType retrieves confirmed transactions filtered by sender type, limited to a specified count.
|
||||
// for unit test
|
||||
func (o *PendingTransaction) GetConfirmedTransactionsBySenderType(ctx context.Context, senderType types.SenderType, limit int) ([]PendingTransaction, error) {
|
||||
|
||||
@@ -210,5 +210,6 @@ func TestFunction(t *testing.T) {
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL1GasPriceAfterCurie", testImportL1GasPriceAfterCurie)
|
||||
t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -56,6 +57,34 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch to pass check for commit batch timeout
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
@@ -101,6 +130,34 @@ func testImportL1GasPriceAfterCurie(t *testing.T) {
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch to pass check for commit batch timeout
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
@@ -110,6 +167,106 @@ func testImportL1GasPriceAfterCurie(t *testing.T) {
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1CfgCopy := *l1Cfg
|
||||
// set CheckCommittedBatchesWindowMinutes to zero to not pass check for commit batch timeout
|
||||
l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-2, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number-1, startHeight-2)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number - 1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number-1, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// relay gas price
|
||||
// gas price will be relayed to some default value because we didn't commit batches for a l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0 minutes
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
|
||||
// fetch new blocks
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
latestBlockHeight, err = l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
// gas price should not be relayed one more time because previously we already set it do default value
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
Reference in New Issue
Block a user