Compare commits

..

5 Commits

Author SHA1 Message Date
colin
5204ad50e0 fix(bridge-history): unclaimed withdrawals (#1671) 2025-05-28 00:54:27 +08:00
colin
f824fb0efc fix(coordinator): remove initialize euclid VKs (#1669) 2025-05-26 15:42:26 +08:00
colin
a55c7bdc77 refactor(coordinator): remove outdated logic (#1668)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-26 14:32:39 +08:00
Alejandro Ranchal-Pedrosa
47b1a037a9 Fix bug sequencer submission strategy and log commit price (#1664)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
2025-05-23 10:02:49 +01:00
Alejandro Ranchal-Pedrosa
ae34020c34 perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
2025-05-22 20:38:10 +08:00
13 changed files with 98 additions and 138 deletions

View File

@@ -154,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

View File

@@ -1,10 +1,7 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -42,26 +39,14 @@ pub struct VerifierConfig {
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
@@ -70,12 +55,6 @@ pub fn init(config: VerifierConfig) {
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -11,7 +11,6 @@ import (
)
const (
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidV2ForkNameForProver = "euclidv2"
@@ -46,7 +45,7 @@ const (
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
@@ -97,7 +96,7 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
@@ -110,7 +109,7 @@ type BatchTaskDetail struct {
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.15"
var tag = "v4.5.20"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -9,7 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -34,7 +33,6 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
return &LoginLogic{

View File

@@ -79,10 +79,6 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
OpenVMVkMap: make(map[string]struct{}),
}
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
}

View File

@@ -179,13 +179,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
txHash, _, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
if err != nil {
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
return
}
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, txHash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
@@ -195,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.lastBlobBaseFee = blobBaseFee
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
log.Info("Update l1 base fee", "txHash", txHash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
}
}
}

View File

@@ -104,10 +104,15 @@ type StrategyParams struct {
}
// bestParams maps your 2h/5h/12h windows to their best rules.
// Timeouts are in seconds, 2, 5 and 12 hours (and same + 20 mins to account for
// time to create batch currently roughly, as time is measured from block creation)
var bestParams = map[uint64]StrategyParams{
2 * 3600: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
5 * 3600: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
12 * 3600: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
7200: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
8400: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
18000: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
19200: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
42800: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
44400: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -147,6 +152,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
}
strategy, ok := bestParams[uint64(cfg.BatchSubmission.TimeoutSec)]
if !ok {
return nil, fmt.Errorf("invalid timeout for batch submission: %v", cfg.BatchSubmission.TimeoutSec)
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
@@ -164,7 +174,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
l1RollupABI: bridgeAbi.ScrollChainABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: bestParams[uint64(cfg.BatchSubmission.TimeoutSec)],
batchStrategy: strategy,
cfg: cfg,
chainCfg: chainCfg,
}
@@ -271,11 +281,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
// submit genesis batch to L1 rollup contract
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
txHash, _, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -326,6 +336,8 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// if backlog outgrow max size, forcesubmit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -333,9 +345,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
var forceSubmit bool
oldestBatchTimestamp := dbBatches[0].CreatedAt
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
if err != nil {
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
return
}
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
@@ -346,10 +364,12 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
if !forceSubmit {
// check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBatchTimestamp)
skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
// return if not hitting target price
if skip {
log.Debug("Skipping batch submission", "reason", err)
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
log.Debug("first batch index", dbBatches[0].Index)
log.Debug("backlog count", backlogCount)
return
}
if err != nil {
@@ -432,7 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
if forceSubmit {
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt)
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
}
// We have at least 1 batch to commit
@@ -457,7 +477,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
txHash, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
if err != nil {
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
@@ -497,6 +517,8 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())
r.metrics.rollupL2RelayerCommitPrice.Set(float64(blobBaseFee))
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
@@ -680,7 +702,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
}
txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
txHash, _, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index,
"start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex,
@@ -1079,7 +1101,7 @@ func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime t
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blobfee is above target and the timeout window hasnt yet elapsed.
// Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
hist, err := r.fetchBlobFeeHistory(windowSec)
@@ -1094,6 +1116,11 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]
currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(

View File

@@ -26,6 +26,12 @@ type l2RelayerMetrics struct {
rollupL2RelayerCommitBlockHeight prometheus.Gauge
rollupL2RelayerCommitThroughput prometheus.Counter
rollupL2RelayerCurrentBlobPrice prometheus.Gauge
rollupL2RelayerTargetBlobPrice prometheus.Gauge
rollupL2RelayerCommitLatency prometheus.Gauge
rollupL2RelayerBacklogCounts prometheus.Gauge
rollupL2RelayerCommitPrice prometheus.Gauge
}
var (
@@ -104,6 +110,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_l2_relayer_commit_throughput",
Help: "The cumulative gas used in blocks committed by the L2 relayer",
}),
rollupL2RelayerTargetBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_target_blob_price",
Help: "The target blob price for the L2 relayer's submission strategy",
}),
rollupL2RelayerCurrentBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_current_blob_price",
Help: "The current blob price",
}),
rollupL2RelayerCommitLatency: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_latency",
Help: "The latency of the commit measured from oldest blocktime",
}),
rollupL2RelayerBacklogCounts: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_backlog_counts",
Help: "The number of pending batches in the backlog",
}),
rollupL2RelayerCommitPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_price",
Help: "The commit price for the L2 relayer's submission strategy",
}),
}
})
return l2RelayerMetric

View File

@@ -171,7 +171,7 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, error) {
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, uint64, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
var (
feeData *FeeData
@@ -190,37 +190,37 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
numPendingTransactions, err = s.pendingTransactionOrm.GetCountPendingTransactionsBySenderType(s.ctx, s.senderType)
if err != nil {
log.Error("failed to count pending transactions", "err: %w", err)
return common.Hash{}, fmt.Errorf("failed to count pending transactions, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to count pending transactions, err: %w", err)
}
if numPendingTransactions >= s.config.MaxPendingBlobTxs {
return common.Hash{}, ErrTooManyPendingBlobTxs
return common.Hash{}, 0, ErrTooManyPendingBlobTxs
}
}
sidecar, err = makeSidecar(blobs)
if err != nil {
log.Error("failed to make sidecar for blob transaction", "error", err)
return common.Hash{}, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
}
}
blockNumber, baseFee, blobBaseFee, err := s.getBlockNumberAndBaseFeeAndBlobFee(s.ctx)
if err != nil {
log.Error("failed to get block number and base fee", "error", err)
return common.Hash{}, fmt.Errorf("failed to get block number and base fee, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to get block number and base fee, err: %w", err)
}
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to get fee data, err: %w", err)
}
signedTx, err := s.createTx(feeData, target, data, sidecar, s.transactionSigner.GetNonce())
if err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, fmt.Errorf("failed to create signed transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to create signed transaction, err: %w", err)
}
// Insert the transaction into the pending transaction table.
@@ -228,14 +228,14 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
// This case will be handled by the checkPendingTransaction function.
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), signedTx, blockNumber); err != nil {
log.Error("failed to insert transaction", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
}
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
// Delete the transaction from the pending transaction table if it fails to send.
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
return common.Hash{}, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
return common.Hash{}, 0, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
}
log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err)
@@ -244,12 +244,12 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
if strings.Contains(err.Error(), "nonce too low") {
s.resetNonce(context.Background())
}
return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
}
s.transactionSigner.SetNonce(signedTx.Nonce() + 1)
return signedTx.Hash(), nil
return signedTx.Hash(), blobBaseFee, nil
}
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, nonce uint64) (*gethTypes.Transaction, error) {

View File

@@ -187,7 +187,7 @@ func testSendAndRetrieveTransaction(t *testing.T) {
if txBlob[i] != nil {
blobs = []*kzg4844.Blob{txBlob[i]}
}
hash, err := s.SendTransaction("0", &common.Address{}, nil, blobs)
hash, _, err := s.SendTransaction("0", &common.Address{}, nil, blobs)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
@@ -545,10 +545,10 @@ func testResubmitNonceGappedTransaction(t *testing.T) {
if txBlob[i] != nil {
blobs = []*kzg4844.Blob{txBlob[i]}
}
_, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs)
_, _, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs)
assert.NoError(t, err)
_, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs)
_, _, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs)
assert.NoError(t, err)
s.checkPendingTransaction()
@@ -589,7 +589,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
return nil
})
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -631,7 +631,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
return nil
})
originTxHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
originTxHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -691,7 +691,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
return nil
})
txHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
txHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -761,7 +761,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
return nil
})
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -835,7 +835,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
assert.NoError(t, err)
defer s.Stop()
_, err = s.SendTransaction("0", &testContractsAddress, data, blobs)
_, _, err = s.SendTransaction("0", &testContractsAddress, data, blobs)
assert.NoError(t, err)
var txHash common.Hash
@@ -893,10 +893,10 @@ func testSendBlobCarryingTxOverLimit(t *testing.T) {
assert.NoError(t, err)
for i := 0; i < int(cfgCopy.MaxPendingBlobTxs); i++ {
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
}
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
assert.ErrorIs(t, err, ErrTooManyPendingBlobTxs)
s.Stop()
}