Compare commits

..

7 Commits

Author SHA1 Message Date
colin
2b2cc62efe fix(coordinator): add metric roller_proofs_generated_failed_time (#419) 2023-04-12 18:27:55 +08:00
colin
807b7c7f33 refactor(coordinator): adjust logs for Loki query (#417)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-04-11 16:38:52 +08:00
HAOYUatHZ
454f032c9f doc(test): add testing doc (#412) 2023-04-11 11:05:33 +08:00
maskpp
d1c4fa716d fix(test): Clean the exited container by --rm after container stopped. (#416) 2023-04-10 19:18:59 +08:00
Lawliet-Chan
de1e40d19c fix(libzkp): load_params and seed in zk (#415) 2023-04-10 15:41:16 +08:00
Ahmed Castro
76b5a6c751 Small typo fix on documentation comments (#411)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-09 15:34:27 +08:00
colin
bad77eac2f feat(coordinator): prover monitoring (#392)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-04-07 09:06:58 +08:00
15 changed files with 210 additions and 61 deletions

View File

@@ -1,3 +1,5 @@
# Scroll Monorepo
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
For a more comprehensive doc, see [`docs/`](./docs).

View File

@@ -23,11 +23,11 @@ import (
var (
bridgeL2BatchesGasOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesTxsOverThresholdTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commit/total", metrics.ScrollRegistry)
bridgeL2BatchesBlocksCreatedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
bridgeL2BatchesCommitsSentTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
bridgeL2BatchesCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/blocks/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/txs/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedRateMeter = geth_metrics.NewRegisteredMeter("bridge/l2/batches/gas/created/rate", metrics.ScrollRegistry)
bridgeL2BatchesTxsCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
bridgeL2BatchesGasCreatedPerBatchGauge = geth_metrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
)
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
@@ -257,7 +257,7 @@ func (p *BatchProposer) TryCommitBatches() {
log.Error("SendCommitTx failed", "error", err)
} else {
// pop the processed batches from the buffer
bridgeL2BatchesCommitTotalCounter.Inc(1)
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
p.batchDataBuffer = p.batchDataBuffer[index:]
}
}
@@ -273,9 +273,9 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
@@ -286,9 +286,9 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blocks[0].TxNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blocks[0].GasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
}
return true
}
@@ -316,9 +316,9 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if err := p.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
} else {
bridgeL2BatchesTxsCreatedRateMeter.Mark(int64(txNum))
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blocks)))
}
return true

View File

@@ -28,8 +28,8 @@ import (
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/height", metrics.ScrollRegistry)
bridgeL2TracesFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/traces/fetched/gap", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = geth_metrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsSentEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL2MsgsAppendEventsTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
@@ -162,8 +162,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, bloc
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2TracesFetchedHeightGauge.Update(int64(to))
bridgeL2TracesFetchedGapGauge.Update(int64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}

View File

@@ -80,7 +80,7 @@ func (b *App) RunDBApp(t *testing.T, option, keyword string) {
app.RunApp(nil)
}
// Free clear all running images
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.l1gethImg != nil {
_ = b.l1gethImg.Stop()

View File

@@ -82,7 +82,7 @@ func (i *ImgDB) Endpoint() string {
}
func (i *ImgDB) prepare() []string {
cmd := []string{"docker", "run", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),

View File

@@ -125,7 +125,7 @@ func (i *ImgGeth) Stop() error {
}
func (i *ImgGeth) prepare() []string {
cmds := []string{"docker", "run", "--name", i.name}
cmds := []string{"docker", "run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)

View File

@@ -3,7 +3,7 @@ use libc::c_char;
use std::cell::OnceCell;
use types::eth::BlockTrace;
use zkevm::circuit::AGG_DEGREE;
use zkevm::utils::{load_or_create_params, load_or_create_seed};
use zkevm::utils::{load_params, load_seed};
use zkevm::{circuit::DEGREE, prover::Prover};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -15,9 +15,9 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
let params_path = c_char_to_str(params_path);
let seed_path = c_char_to_str(seed_path);
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_or_create_seed(seed_path).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let seed = load_seed(seed_path).unwrap();
let p = Prover::from_params_and_seed(params, agg_params, seed);
PROVER.set(p).unwrap();
}

View File

@@ -4,7 +4,7 @@ use std::fs::File;
use std::io::Read;
use zkevm::circuit::{AGG_DEGREE, DEGREE};
use zkevm::prover::AggCircuitProof;
use zkevm::utils::load_or_create_params;
use zkevm::utils::load_params;
use zkevm::verifier::Verifier;
static mut VERIFIER: Option<&Verifier> = None;
@@ -20,8 +20,8 @@ pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path:
let mut agg_vk = vec![];
f.read_to_end(&mut agg_vk).unwrap();
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let params = load_params(params_path, *DEGREE).unwrap();
let agg_params = load_params(params_path, *AGG_DEGREE).unwrap();
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
VERIFIER = Some(Box::leak(v))

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v3.0.6"
var tag = "v3.0.10"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -43,7 +43,7 @@ interface IScrollMessenger {
*****************************/
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
/// @param target The address of account who recieve the message.
/// @param target The address of account who receive the message.
/// @param value The amount of ether passed when call target contract.
/// @param message The content of the message.
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.
@@ -55,7 +55,7 @@ interface IScrollMessenger {
) external payable;
/// @notice Send cross chain message from L1 to L2 or L2 to L1.
/// @param target The address of account who recieve the message.
/// @param target The address of account who receive the message.
/// @param value The amount of ether passed when call target contract.
/// @param message The content of the message.
/// @param gasLimit Gas limit required to complete the message relay on corresponding chain.

View File

@@ -117,6 +117,8 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
return false, fmt.Errorf("the roller or session id doesn't exist, pubkey: %s, ID: %s", pubkey, proof.ID)
}
m.updateMetricRollerProofsLastFinishedTimestampGauge(pubkey)
err := m.handleZkProof(pubkey, proof.ProofDetail)
if err != nil {
return false, err

View File

@@ -30,10 +30,19 @@ import (
)
var (
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/verified/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/verified/failed/total", metrics.ScrollRegistry)
// proofs
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedSuccessTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
coordinatorProofsGeneratedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
// sessions
coordinatorSessionsSuccessTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/success/total", metrics.ScrollRegistry)
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
coordinatorSessionsFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
coordinatorSessionsActiveNumberGauge = geth_metrics.NewRegisteredCounter("coordinator/sessions/active/number", metrics.ScrollRegistry)
)
const (
@@ -242,7 +251,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
if !ok {
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
}
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
proofTime := time.Since(time.Unix(sess.info.StartTimestamp, 0))
proofTimeSec := uint64(proofTime.Seconds())
// Ensure this roller is eligible to participate in the session.
roller, ok := sess.info.Rollers[pk]
@@ -267,6 +277,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof time", proofTimeSec,
)
defer func() {
@@ -287,11 +298,14 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}()
if msg.Status != message.StatusOk {
log.Error(
"Roller failed to generate proof",
"msg.ID", msg.ID,
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(roller.PublicKey, proofTime)
log.Info(
"proof generated by roller failed",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof time", proofTimeSec,
"error", msg.Error,
)
return nil
@@ -309,15 +323,14 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
coordinatorProofsReceivedTotalCounter.Inc(1)
var err error
success, err = m.verifyProof(msg.Proof)
if err != nil {
var verifyErr error
success, verifyErr = m.verifyProof(msg.Proof)
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
} else {
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
}
if success {
@@ -329,15 +342,24 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"error", dbErr)
return dbErr
}
coordinatorProofsVerifiedTotalCounter.Inc(1)
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTotalCounter.Inc(1)
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(roller.PublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(sess *session) {
coordinatorSessionsActiveNumberGauge.Inc(1)
defer coordinatorSessionsActiveNumberGauge.Dec(1)
for {
select {
//Execute after timeout, set in config.json. Consider all rollers failed.
@@ -371,6 +393,7 @@ func (m *Manager) CollectProofs(sess *session) {
}
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
coordinatorSessionsTimeoutTotalCounter.Inc(1)
return
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
@@ -381,6 +404,7 @@ func (m *Manager) CollectProofs(sess *session) {
if err := m.orm.UpdateProvingStatus(ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
@@ -400,6 +424,8 @@ func (m *Manager) CollectProofs(sess *session) {
}
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
coordinatorSessionsSuccessTotalCounter.Inc(1)
return
}
m.mu.Unlock()
@@ -517,6 +543,7 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
rollers[roller.PublicKey] = &types.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: types.RollerAssigned}
}
// No roller assigned.
@@ -545,17 +572,18 @@ func (m *Manager) StartProofGenerationSession(task *types.BlockBatch, prevSessio
sess.info.Attempts += prevSession.info.Attempts
}
for _, roller := range sess.info.Rollers {
log.Info(
"assigned proof to roller",
"session id", sess.info.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof status", roller.Status)
}
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "error", err)
for _, roller := range sess.info.Rollers {
log.Error(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
log.Error("db set session info fail", "session id", sess.info.ID, "error", err)
return false
}

View File

@@ -0,0 +1,71 @@
package coordinator
import (
"time"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer geth_metrics.Timer
rollerProofsVerifiedFailedTimeTimer geth_metrics.Timer
rollerProofsGeneratedFailedTimeTimer geth_metrics.Timer
rollerProofsLastAssignedTimestampGauge geth_metrics.Gauge
rollerProofsLastFinishedTimestampGauge geth_metrics.Gauge
}
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
} else {
log.Error("rollerProofsLastFinishedTimestampGauge is nil", "roller pk", pk)
}
}
}
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
} else {
log.Error("rollerProofsLastAssignedTimestampGauge is nil", "roller pk", pk)
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
} else {
log.Error("rollerProofsVerifiedSuccessTimeTimer is nil", "roller pk", pk)
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
} else {
log.Error("rollerProofsVerifiedFailedTimeTimer is nil", "roller pk", pk)
}
}
}
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).rollerMetrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
} else {
log.Error("rollerProofsGeneratedFailedTimeTimer is nil", "roller pk", pk)
}
}
}

View File

@@ -9,8 +9,10 @@ import (
cmap "github.com/orcaman/concurrent-map"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/message"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
)
@@ -30,6 +32,8 @@ type rollerNode struct {
// Time of message creation
registerTime time.Time
*rollerMetrics
}
func (r *rollerNode) sendTask(id string, traces []*geth_types.BlockTrace) bool {
@@ -64,12 +68,20 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
node, ok := m.rollerPool.Get(pubkey)
if !ok {
taskIDs := m.reloadRollerAssignedTasks(pubkey)
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
Name: identity.Name,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
rollerMetrics: rMs,
}
m.rollerPool.Set(pubkey, node)
}

34
docs/tests.md Normal file
View File

@@ -0,0 +1,34 @@
# Tests
## bridge & coordinator
### Prerequisite
+ rust
+ go1.18
+ docker
To run tests for bridge & coordinator, you would need to build some required docker images first.
```bash
make dev_docker # under repo root directory
```
### Run the tests
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
```
You can also run some related tests (they are dependent to bridge & coordiantor) using
```bash
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Contracts
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](../contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](../contracts/integration-test/).