perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)

Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
This commit is contained in:
Alejandro Ranchal-Pedrosa
2025-05-22 14:38:10 +02:00
committed by GitHub
parent fa9fab6e98
commit ae34020c34
4 changed files with 45 additions and 8 deletions

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) { func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin) elapsed := time.Since(begin)
sql, rowsAffected := fc() sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err) g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
} }
// InitDB init the db handler // InitDB init the db handler

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v4.5.15" var tag = "v4.5.16"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -326,6 +326,8 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// if backlog outgrow max size, forcesubmit enough oldest batches // if backlog outgrow max size, forcesubmit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx) backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
if err != nil { if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err) log.Error("Failed to fetch pending L2 batches", "err", err)
return return
@@ -333,9 +335,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
var forceSubmit bool var forceSubmit bool
oldestBatchTimestamp := dbBatches[0].CreatedAt startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
if err != nil {
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
return
}
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step // if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second { if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true forceSubmit = true
} }
@@ -346,10 +354,12 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
if !forceSubmit { if !forceSubmit {
// check if we should skip submitting the batch based on the fee target // check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBatchTimestamp) skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
// return if not hitting target price // return if not hitting target price
if skip { if skip {
log.Debug("Skipping batch submission", "reason", err) log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
log.Debug("first batch index", dbBatches[0].Index)
log.Debug("backlog count", backlogCount)
return return
} }
if err != nil { if err != nil {
@@ -432,7 +442,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
} }
if forceSubmit { if forceSubmit {
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt) log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
} }
// We have at least 1 batch to commit // We have at least 1 batch to commit
@@ -497,6 +507,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed)) r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit))) r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit))) r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String()) log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
} }
@@ -1079,7 +1090,7 @@ func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime t
// skipSubmitByFee returns (true, nil) when submission should be skipped right now // skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blobfee is above target and the timeout window hasnt yet elapsed. // because the blobfee is above target and the timeout window hasnt yet elapsed.
// Otherwise returns (false, err) // Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) { func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec) windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
hist, err := r.fetchBlobFeeHistory(windowSec) hist, err := r.fetchBlobFeeHistory(windowSec)
@@ -1094,6 +1105,11 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist) target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1] current := hist[len(hist)-1]
currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip // if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second { if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf( return true, fmt.Errorf(

View File

@@ -26,6 +26,11 @@ type l2RelayerMetrics struct {
rollupL2RelayerCommitBlockHeight prometheus.Gauge rollupL2RelayerCommitBlockHeight prometheus.Gauge
rollupL2RelayerCommitThroughput prometheus.Counter rollupL2RelayerCommitThroughput prometheus.Counter
rollupL2RelayerCurrentBlobPrice prometheus.Gauge
rollupL2RelayerTargetBlobPrice prometheus.Gauge
rollupL2RelayerCommitLatency prometheus.Gauge
rollupL2RelayerBacklogCounts prometheus.Gauge
} }
var ( var (
@@ -104,6 +109,22 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_l2_relayer_commit_throughput", Name: "rollup_l2_relayer_commit_throughput",
Help: "The cumulative gas used in blocks committed by the L2 relayer", Help: "The cumulative gas used in blocks committed by the L2 relayer",
}), }),
rollupL2RelayerTargetBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_target_blob_price",
Help: "The target blob price for the L2 relayer's submission strategy",
}),
rollupL2RelayerCurrentBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_current_blob_price",
Help: "The current blob price",
}),
rollupL2RelayerCommitLatency: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_latency",
Help: "The latency of the commit measured from oldest blocktime",
}),
rollupL2RelayerBacklogCounts: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_backlog_counts",
Help: "The number of pending batches in the backlog",
}),
} }
}) })
return l2RelayerMetric return l2RelayerMetric