coordinator: update Shomei frontend at LATEST finalization (#831)

* coordinators: add finalization monitor on LATEST to update shomei frontend faster.

* coordinators: fix spotless

* coordinators: adapt local configs to not try to update disabled services in docker local stack
This commit is contained in:
Fluent Crafter
2025-04-02 09:57:53 +01:00
committed by GitHub
parent 2bb28285fe
commit 811743bb5a
8 changed files with 105 additions and 37 deletions

View File

@@ -91,6 +91,10 @@ request-retry.failures-warning-threshold=2
endpoints=["http://shomei-frontend:8888/"]
request-retry.backoff-delay="PT1S"
request-retry.failures-warning-threshold=2
l1-query-block-tag="LATEST"
l1-polling-interval="PT12S"
l1-request-retry.backoff-delay="PT1S"
l1-request-retry.failures-warning-threshold=2
[api]
observability_port=9545

View File

@@ -23,3 +23,11 @@ endpoints=["http://127.0.0.1:8745"]
[traces.conflation-v2]
endpoints=["http://127.0.0.1:8745"]
[type2-state-proof-provider]
endpoints=[]
[l2-network-gas-pricing.json-rpc-pricing-propagation]
disabled=true
geth-gas-price-update-recipients=[]
besu-gas-price-update-recipients=["http://127.0.0.1:9045"]

View File

@@ -68,3 +68,6 @@ endpoints = ["http://127.0.0.1:8889/"]
[conflation]
#switch-block-number=20
[type2-state-proof-provider]
endpoints=[]

View File

@@ -40,6 +40,7 @@ import net.consensys.linea.web3j.SmartContractErrors
import net.consensys.linea.web3j.Web3jBlobExtended
import net.consensys.zkevm.LongRunningService
import net.consensys.zkevm.coordinator.app.config.CoordinatorConfig
import net.consensys.zkevm.coordinator.app.config.Type2StateProofProviderConfig
import net.consensys.zkevm.coordinator.blockcreation.BatchesRepoBasedLastProvenBlockNumberProvider
import net.consensys.zkevm.coordinator.blockcreation.BlockCreationMonitor
import net.consensys.zkevm.coordinator.blockcreation.GethCliqueSafeBlockProvider
@@ -227,6 +228,14 @@ class L1DependentApp(
)
}
private val l1FinalizationHandlerForShomeiRpc: LongRunningService = setupL1FinalizationMonitorForShomeiFrontend(
type2StateProofProviderConfig = configs.type2StateProofProvider,
httpJsonRpcClientFactory = httpJsonRpcClientFactory,
lineaRollupClient = lineaRollupClient,
l2Web3jClient = l2Web3jClient,
vertx = vertx
)
private val gasPriceCapProvider =
if (configs.l1DynamicGasPriceCapService.enabled) {
val feeHistoryPercentileWindowInBlocks =
@@ -875,20 +884,6 @@ class L1DependentApp(
)
}
private val finalizedBlockNotifier = run {
val log = LogManager.getLogger("clients.ForkChoiceUpdaterShomeiClient")
val type2StateProofProviderClients = configs.type2StateProofProvider.endpoints.map {
ShomeiClient(
vertx = vertx,
rpcClient = httpJsonRpcClientFactory.create(it, log = log),
retryConfig = configs.type2StateProofProvider.requestRetryConfig,
log = log
)
}
ForkChoiceUpdaterImpl(type2StateProofProviderClients)
}
private val lastProvenBlockNumberProvider = run {
val lastProvenConsecutiveBatchBlockNumberProvider = BatchesRepoBasedLastProvenBlockNumberProvider(
lastProcessedBlockNumber.toLong(),
@@ -1023,29 +1018,23 @@ class L1DependentApp(
)
}
private val blockFinalizationHandlerMap = mapOf(
"finalized records cleanup" to RecordsCleanupFinalizationHandler(
batchesRepository = batchesRepository,
blobsRepository = blobsRepository,
aggregationsRepository = aggregationsRepository
),
"type 2 state proof provider finalization updates" to FinalizationHandler {
finalizedBlockNotifier.updateFinalizedBlock(
BlockNumberAndHash(it.blockNumber, it.blockHash.toArray())
)
},
"last_proven_block_provider" to FinalizationHandler { update: FinalizationMonitor.FinalizationUpdate ->
lastProvenBlockNumberProvider.updateLatestL1FinalizedBlock(update.blockNumber.toLong())
},
"highest_accepted_finalization_on_l1" to FinalizationHandler { update: FinalizationMonitor.FinalizationUpdate ->
highestAcceptedFinalizationTracker(update.blockNumber)
}
)
init {
blockFinalizationHandlerMap.forEach { (handlerName, handler) ->
l1FinalizationMonitor.addFinalizationHandler(handlerName, handler)
}
mapOf(
"last_proven_block_provider" to FinalizationHandler { update: FinalizationMonitor.FinalizationUpdate ->
lastProvenBlockNumberProvider.updateLatestL1FinalizedBlock(update.blockNumber.toLong())
},
"finalized records cleanup" to RecordsCleanupFinalizationHandler(
batchesRepository = batchesRepository,
blobsRepository = blobsRepository,
aggregationsRepository = aggregationsRepository
),
"highest_accepted_finalization_on_l1" to FinalizationHandler { update: FinalizationMonitor.FinalizationUpdate ->
highestAcceptedFinalizationTracker(update.blockNumber)
}
)
.forEach { (handlerName, handler) ->
l1FinalizationMonitor.addFinalizationHandler(handlerName, handler)
}
}
override fun start(): CompletableFuture<Unit> {
@@ -1057,6 +1046,7 @@ class L1DependentApp(
aggregationsRepository = aggregationsRepository
)
.thenCompose { l1FinalizationMonitor.start() }
.thenCompose { l1FinalizationHandlerForShomeiRpc.start() }
.thenCompose { blobSubmissionCoordinator.start() }
.thenCompose { aggregationFinalizationCoordinator.start() }
.thenCompose { proofAggregationCoordinatorService.start() }
@@ -1075,6 +1065,7 @@ class L1DependentApp(
override fun stop(): CompletableFuture<Unit> {
return SafeFuture.allOf(
l1FinalizationMonitor.stop(),
l1FinalizationHandlerForShomeiRpc.stop(),
blobSubmissionCoordinator.stop(),
aggregationFinalizationCoordinator.stop(),
proofAggregationCoordinatorService.stop(),
@@ -1144,6 +1135,52 @@ class L1DependentApp(
false -> TracesCountersV1.EMPTY_TRACES_COUNT
}
}
fun setupL1FinalizationMonitorForShomeiFrontend(
type2StateProofProviderConfig: Type2StateProofProviderConfig?,
httpJsonRpcClientFactory: VertxHttpJsonRpcClientFactory,
lineaRollupClient: LineaRollupSmartContractClientReadOnly,
l2Web3jClient: Web3j,
vertx: Vertx
): LongRunningService {
if (type2StateProofProviderConfig == null || type2StateProofProviderConfig.endpoints.isEmpty()) {
return DisabledLongRunningService
}
val finalizedBlockNotifier = run {
val log = LogManager.getLogger("clients.ForkChoiceUpdaterShomeiClient")
val type2StateProofProviderClients = type2StateProofProviderConfig.endpoints.map {
ShomeiClient(
vertx = vertx,
rpcClient = httpJsonRpcClientFactory.create(it, log = log),
retryConfig = type2StateProofProviderConfig.requestRetryConfig,
log = log
)
}
ForkChoiceUpdaterImpl(type2StateProofProviderClients)
}
val l1FinalizationMonitor =
FinalizationMonitorImpl(
config =
FinalizationMonitorImpl.Config(
pollingInterval = type2StateProofProviderConfig.l1PollingInterval.toKotlinDuration(),
l1QueryBlockTag = type2StateProofProviderConfig.l1QueryBlockTag
),
contract = lineaRollupClient,
l2Client = l2Web3jClient,
vertx = vertx
)
l1FinalizationMonitor.addFinalizationHandler("type 2 state proof provider finalization updates", {
finalizedBlockNotifier.updateFinalizedBlock(
BlockNumberAndHash(it.blockNumber, it.blockHash.toArray())
)
})
return l1FinalizationMonitor
}
}
}

View File

@@ -496,6 +496,12 @@ data class GasPriceCapTimeOfDayMultipliersConfig(val gasPriceCapTimeOfDayMultipl
data class Type2StateProofProviderConfig(
val endpoints: List<URL>,
val l1QueryBlockTag: BlockParameter.Tag = BlockParameter.Tag.LATEST,
val l1PollingInterval: Duration = Duration.ofSeconds(12),
val l1RequestRetry: RequestRetryConfigTomlFriendly = RequestRetryConfigTomlFriendly(
backoffDelay = Duration.ofSeconds(1),
failuresWarningThreshold = 3
),
override val requestRetry: RequestRetryConfigTomlFriendly
) : RequestRetryConfigurable

View File

@@ -134,6 +134,12 @@ class CoordinatorConfigTest {
requestRetry = RequestRetryConfigTomlFriendly(
backoffDelay = Duration.parse("PT1S"),
failuresWarningThreshold = 2
),
l1QueryBlockTag = BlockParameter.Tag.SAFE,
l1PollingInterval = Duration.parse("PT6S"),
l1RequestRetry = RequestRetryConfigTomlFriendly(
backoffDelay = Duration.parse("PT0.5S"),
failuresWarningThreshold = 20
)
)
private val stateManagerConfig = StateManagerClientConfig(

View File

@@ -73,6 +73,10 @@ request-retry.failures-warning-threshold=2
endpoints=["http://shomei-frontend:8888/"]
request-retry.backoff-delay="PT1S"
request-retry.failures-warning-threshold=2
l1-query-block-tag="SAFE"
l1-polling-interval="PT6S"
l1-request-retry.backoff-delay="PT0.5S"
l1-request-retry.failures-warning-threshold=20
[api]
observability_port=9545

View File

@@ -287,7 +287,7 @@ services:
coordinator:
hostname: coordinator
container_name: coordinator
image: consensys/linea-coordinator:${COORDINATOR_TAG:-cd7228e}
image: consensys/linea-coordinator:${COORDINATOR_TAG:-29db47d}
platform: linux/amd64
profiles: [ "l2", "debug" ]
depends_on: