mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-06 20:13:59 -05:00
Add log capitalization analyzer and apply changes (#15452)
* Add log capitalization analyzer and apply fixes across codebase Implements a new nogo analyzer to enforce proper log message capitalization and applies the fixes to all affected log statements throughout the beacon chain, validator, and supporting components. Co-Authored-By: Claude <noreply@anthropic.com> * Radek's feedback --------- Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -194,6 +194,7 @@ nogo(
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
|
||||
@@ -72,7 +72,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
@@ -89,7 +89,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
log.WithFields(log.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
}).Info("Builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.WithField("function", funcName).Trace("running")
|
||||
log.WithField("function", funcName).Trace("Running")
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
log.WithField("function", funcName).Debug("context is closed, exiting")
|
||||
log.WithField("function", funcName).Debug("Context is closed, exiting")
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
log.WithError(err).Error("Could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not compute payload attributes")
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
@@ -114,7 +114,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
@@ -135,7 +135,7 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
log.WithError(err).Error("Could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
log.WithError(err).Error("Could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
|
||||
@@ -329,7 +329,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
@@ -339,12 +339,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update proposer index state-root map")
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -562,7 +562,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -927,10 +927,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -944,7 +944,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
|
||||
@@ -68,11 +68,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
@@ -527,7 +527,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// is meant to be asynchronous and run in the background rather than being
|
||||
// tied to the execution of a block.
|
||||
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
|
||||
log.WithError(err).Error("could not migrate to cold")
|
||||
log.WithError(err).Error("Could not migrate to cold")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -616,7 +616,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
log.WithError(err).Error("Could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -634,7 +634,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("Could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
@@ -643,12 +643,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
|
||||
0 /* Setting a zero value as we have no access to block height */); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
log.WithError(err).Error("Could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
log.WithError(err).Error("Could not prune deposit proofs")
|
||||
}
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
|
||||
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ func (s *Service) updateCheckpoints(
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
log.WithError(err).Error("Could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
|
||||
@@ -373,7 +373,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
log.Error("Event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
@@ -410,7 +410,7 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
@@ -46,7 +46,7 @@ func (s *Service) startupHeadRoot() [32]byte {
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
@@ -64,16 +64,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -178,7 +178,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if clearCount != s.cleared.Load() {
|
||||
log.Warn("cache rotated during async committee update operation - abandoning cache update")
|
||||
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -194,11 +194,11 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("failed to find source validator index")
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -280,7 +280,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to fetch pending balance to withdraw")
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
@@ -290,7 +290,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to compute consolidation epoch")
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -122,7 +122,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).WithFields(logging.BlockFieldsFromBlob(sidecars[0])).
|
||||
Debug("invalid BlobSidecars received")
|
||||
Debug("Invalid BlobSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid BlobSidecars received for block %#x", root)
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ func pruneBefore(before primitives.Epoch, l fsLayout) (map[primitives.Epoch]*pru
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Error("encountered unhandled error during pruning")
|
||||
log.WithError(err).Error("Encountered unhandled error during pruning")
|
||||
return nil, errors.Wrap(errPruneFailed, err.Error())
|
||||
}
|
||||
if ident.epoch >= before {
|
||||
|
||||
@@ -620,7 +620,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
blobIndex := kzgIndexes[i]
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, blobIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to get Merkle proof for KZG commitment")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
@@ -634,14 +634,14 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to create RO blob with root")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to verify RO blob")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("Failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -248,14 +248,14 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to generate deposit proof")
|
||||
log.WithError(err).Error("Unable to generate deposit proof")
|
||||
}
|
||||
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
||||
}
|
||||
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil { // This should never happen.
|
||||
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
|
||||
log.WithError(err).Error("Unable to determine root of deposit trie, aborting chain start")
|
||||
return
|
||||
}
|
||||
s.chainStartData.Eth1Data = ðpb.Eth1Data{
|
||||
|
||||
@@ -127,7 +127,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
log.WithError(remErr).Error("Could not remove node")
|
||||
}
|
||||
return errors.Wrap(err, "could not update checkpoints")
|
||||
}
|
||||
|
||||
@@ -78,16 +78,16 @@ func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftO
|
||||
for as := range ch {
|
||||
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(as)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not aggregate unaggregated attestations")
|
||||
log.WithError(err).Error("Could not aggregate unaggregated attestations")
|
||||
continue
|
||||
}
|
||||
if aggregated == nil {
|
||||
log.Error("nil aggregated attestation")
|
||||
log.Error("Nil aggregated attestation")
|
||||
continue
|
||||
}
|
||||
if aggregated.IsAggregated() {
|
||||
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
|
||||
log.WithError(err).Error("could not save aggregated attestation")
|
||||
log.WithError(err).Error("Could not save aggregated attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -214,13 +214,13 @@ func defaultAggregateTopicParams(activeValidators uint64) *pubsub.TopicScorePara
|
||||
aggPerSlot := aggregatorsPerSlot(activeValidators)
|
||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(aggregateWeight, meshThreshold)
|
||||
@@ -256,13 +256,13 @@ func defaultSyncContributionTopicParams() *pubsub.TopicScoreParams {
|
||||
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
|
||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
|
||||
@@ -305,7 +305,7 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco
|
||||
// Determine the amount of validators expected in a subnet in a single slot.
|
||||
numPerSlot := time.Duration(subnetWeight / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
if numPerSlot == 0 {
|
||||
log.Warn("numPerSlot is 0, skipping initializing topic scoring")
|
||||
log.Warn("Number per slot is 0, skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
comsPerSlot := committeeCountPerSlot(activeValidators)
|
||||
@@ -318,20 +318,20 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco
|
||||
}
|
||||
rate := numPerSlot * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
log.Warn("Skipping initializing topic scoring because rate is 0")
|
||||
return nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
||||
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||
log.WithError(err).Warn("Skipping initializing topic scoring")
|
||||
return nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
||||
@@ -381,7 +381,7 @@ func defaultSyncSubnetTopicParams(activeValidators uint64) *pubsub.TopicScorePar
|
||||
|
||||
rate := subnetWeight * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
log.Warn("Skipping initializing topic scoring because rate is 0")
|
||||
return nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
|
||||
@@ -222,7 +222,7 @@ func (s *Service) Start() {
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not convert ENR to multiaddr")
|
||||
log.WithError(err).Error("Could not convert ENR to multiaddr")
|
||||
}
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
@@ -429,7 +429,7 @@ func (s *Service) awaitStateInitialized() {
|
||||
}
|
||||
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to receive initial genesis data")
|
||||
log.WithError(err).Fatal("Failed to receive initial genesis data")
|
||||
}
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
|
||||
@@ -88,7 +88,7 @@ func createENR() *enr.Record {
|
||||
}
|
||||
db, err := enode.OpenDB("")
|
||||
if err != nil {
|
||||
log.Error("could not open node's peer database")
|
||||
log.Error("Could not open node's peer database")
|
||||
}
|
||||
lNode := enode.NewLocalNode(db, key)
|
||||
return lNode.Node().Record()
|
||||
|
||||
@@ -819,7 +819,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
|
||||
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
|
||||
feeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
|
||||
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
|
||||
log.WithField("validatorIndex", validatorIndex).Warn("fee recipient is the burn address")
|
||||
log.WithField("validatorIndex", validatorIndex).Warn("Fee recipient is the burn address")
|
||||
}
|
||||
}
|
||||
val := cache.TrackedValidator{
|
||||
|
||||
@@ -136,7 +136,7 @@ func logFailedReorgAttempt(slot primitives.Slot, oldHeadRoot, headRoot [32]byte)
|
||||
"slot": slot,
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
}).Warn("late block attempted reorg failed")
|
||||
}).Warn("Late block attempted reorg failed")
|
||||
}
|
||||
|
||||
func (vs *Server) getHeadNoReorg(ctx context.Context, slot primitives.Slot, parentRoot [32]byte) (state.BeaconState, error) {
|
||||
@@ -430,7 +430,7 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
|
||||
feeRecipient = primitives.ExecutionAddress(params.BeaconConfig().DefaultFeeRecipient)
|
||||
if feeRecipient == primitives.ExecutionAddress([20]byte{}) {
|
||||
log.WithField("validatorIndex", r.ValidatorIndex).Warn("fee recipient is the burn address")
|
||||
log.WithField("validatorIndex", r.ValidatorIndex).Warn("Fee recipient is the burn address")
|
||||
}
|
||||
}
|
||||
val := cache.TrackedValidator{
|
||||
|
||||
@@ -132,7 +132,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
if bid.Version() >= version.Deneb {
|
||||
bidDeneb, ok := bid.(builder.BidDeneb)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidDeneb", bid)
|
||||
log.Warnf("Bid type %T does not implement builder.BidDeneb", bid)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
builderKzgCommitments = bidDeneb.BlobKzgCommitments()
|
||||
@@ -143,7 +143,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
if bid.Version() >= version.Electra {
|
||||
bidElectra, ok := bid.(builder.BidElectra)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidElectra", bid)
|
||||
log.Warnf("Bid type %T does not implement builder.BidElectra", bid)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
executionRequests = bidElectra.ExecutionRequests()
|
||||
|
||||
@@ -89,7 +89,7 @@ func (vs *Server) deposits(
|
||||
}
|
||||
|
||||
if !vs.Eth1InfoFetcher.ExecutionClientConnected() {
|
||||
log.Warn("not connected to eth1 node, skip pending deposit insertion")
|
||||
log.Warn("Not connected to eth1 node, skip pending deposit insertion")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func (vs *Server) deposits(
|
||||
// If there are no pending deposits, exit early.
|
||||
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
|
||||
if len(allPendingContainers) == 0 {
|
||||
log.Debug("no pending deposits for inclusion in block")
|
||||
log.Debug("No pending deposits for inclusion in block")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ func (vs *Server) getLocalPayloadFromEngine(
|
||||
|
||||
val, tracked := vs.TrackedValidatorsCache.Validator(proposerId)
|
||||
if !tracked {
|
||||
logrus.WithFields(logFields).Warn("could not find tracked proposer index")
|
||||
logrus.WithFields(logFields).Warn("Could not find tracked proposer index")
|
||||
}
|
||||
setFeeRecipientIfBurnAddress(&val)
|
||||
|
||||
|
||||
@@ -3271,7 +3271,7 @@ func TestProposer_GetParentHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte(str), [32]byte(headStr))
|
||||
require.NotEqual(t, [32]byte(str), [32]byte(genesisStr))
|
||||
require.LogsContain(t, hook, "late block attempted reorg failed")
|
||||
require.LogsContain(t, hook, "Late block attempted reorg failed")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -247,7 +247,7 @@ func GetChunkFromDatabase(
|
||||
|
||||
func closeDB(d *slasherkv.Store) {
|
||||
if err := d.Close(); err != nil {
|
||||
log.WithError(err).Error("could not close database")
|
||||
log.WithError(err).Error("Could not close database")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo primitives.S
|
||||
"startSlot": s.Slot(),
|
||||
"endSlot": replayTo,
|
||||
"diff": replayTo - s.Slot(),
|
||||
}).Debug("calling process_slots on remaining slots")
|
||||
}).Debug("Calling process_slots on remaining slots")
|
||||
|
||||
// err will be handled after the bookend log
|
||||
s, err = ReplayProcessSlots(ctx, s, replayTo)
|
||||
@@ -161,7 +161,7 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo primitives.S
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("time spent in process_slots")
|
||||
}).Debug("Time spent in process_slots")
|
||||
replayToSlotSummary.Observe(float64(duration.Milliseconds()))
|
||||
|
||||
return s, nil
|
||||
|
||||
@@ -59,7 +59,7 @@ func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int,
|
||||
for i := 0; i < groupSize; i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not get validator field roots")
|
||||
logrus.WithError(err).Error("Could not get validator field roots")
|
||||
return
|
||||
}
|
||||
for k, root := range fRoots {
|
||||
|
||||
@@ -365,7 +365,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := sortedBlockWithVerifiedBlobSlice(blocks)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("invalid BeaconBlocksByRange response")
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlocksByRange response")
|
||||
continue
|
||||
}
|
||||
if len(features.Get().BlacklistedRoots) > 0 {
|
||||
|
||||
@@ -106,7 +106,7 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
blocksFetcher := cfg.blocksFetcher
|
||||
if blocksFetcher == nil {
|
||||
if cfg.bs == nil {
|
||||
log.Warn("rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
|
||||
log.Warn("Rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
|
||||
}
|
||||
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
ctxMap: cfg.ctxMap,
|
||||
|
||||
@@ -153,7 +153,7 @@ func (s *Service) processFetchedData(ctx context.Context, data *blocksQueueFetch
|
||||
func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQueueFetchedData) (uint64, error) {
|
||||
bwb, err := validUnprocessed(ctx, data.bwb, s.cfg.Chain.HeadSlot(), s.isProcessedBlock)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("batch did not contain a valid sequence of unprocessed blocks")
|
||||
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
|
||||
return 0, err
|
||||
}
|
||||
if len(bwb) == 0 {
|
||||
|
||||
@@ -130,7 +130,7 @@ func (s *Service) Start() {
|
||||
log.Info("Waiting for state to be initialized")
|
||||
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("initial-sync failed to receive startup event")
|
||||
log.WithError(err).Error("Initial-sync failed to receive startup event")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
@@ -138,7 +138,7 @@ func (s *Service) Start() {
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
log.WithField("genesisValidatorRoot", clock.GenesisValidatorsRoot()).
|
||||
WithError(err).Error("unable to initialize context version map using genesis validator")
|
||||
WithError(err).Error("Unable to initialize context version map using genesis validator")
|
||||
return
|
||||
}
|
||||
s.ctxMap = ctxMap
|
||||
|
||||
@@ -231,7 +231,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "initial-sync failed to receive startup event")
|
||||
assert.LogsContain(t, hook, "Initial-sync failed to receive startup event")
|
||||
assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed")
|
||||
})
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
"endSlot": rp.end,
|
||||
"size": rp.size,
|
||||
"current": s.cfg.clock.CurrentSlot(),
|
||||
}).Debug("error in validating range availability")
|
||||
}).Debug("Error in validating range availability")
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, p2ptypes.ErrResourceUnavailable.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil
|
||||
@@ -78,7 +78,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("error in BlocksByRange batch")
|
||||
log.WithError(err).Info("Error in BlocksByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
@@ -92,7 +92,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
if sc.Slot() < minReqSlot {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrBlobLTMinRequest).
|
||||
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
|
||||
Debugf("Requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
|
||||
return types.ErrBlobLTMinRequest
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
}
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in DataColumnSidecarsByRange batch")
|
||||
log.WithError(err).Debug("Error in DataColumnSidecarsByRange batch")
|
||||
|
||||
// If we hit a rate limit, the error response has already been written, and the stream is already closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
|
||||
@@ -146,7 +146,7 @@ func (bv *ROBlobVerifier) NotFromFutureSlot() (err error) {
|
||||
earliestStart := bv.clock.SlotStart(bv.blob.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the blob from a future slot and return an error.
|
||||
if bv.clock.Now().Before(earliestStart) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is too far in the future")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar slot is too far in the future")
|
||||
return errFromFutureSlot
|
||||
}
|
||||
return nil
|
||||
@@ -163,7 +163,7 @@ func (bv *ROBlobVerifier) SlotAboveFinalized() (err error) {
|
||||
return errors.Wrapf(errSlotNotAfterFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
|
||||
}
|
||||
if bv.blob.Slot() <= fSlot {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is not after finalized checkpoint")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar slot is not after finalized checkpoint")
|
||||
return errSlotNotAfterFinalized
|
||||
}
|
||||
return nil
|
||||
@@ -180,7 +180,7 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
|
||||
if seen {
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc()
|
||||
if err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("Reusing failed proposer signature validation from cache")
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
|
||||
return ErrInvalidProposerSignature
|
||||
}
|
||||
@@ -191,12 +191,12 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
|
||||
// Retrieve the parent state to fallback to full verification.
|
||||
parent, err := bv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("could not replay parent state for blob signature verification")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("Could not replay parent state for blob signature verification")
|
||||
return ErrInvalidProposerSignature
|
||||
}
|
||||
// Full verification, which will subsequently be cached for anything sharing the signature cache.
|
||||
if err = bv.sc.VerifySignature(sd, parent); err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("signature verification failed")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("Signature verification failed")
|
||||
return ErrInvalidProposerSignature
|
||||
}
|
||||
return nil
|
||||
@@ -213,7 +213,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
|
||||
if bv.fc.HasNode(bv.blob.ParentRoot()) {
|
||||
return nil
|
||||
}
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root has not been seen")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Parent root has not been seen")
|
||||
return errSidecarParentNotSeen
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
|
||||
func (bv *ROBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) {
|
||||
defer bv.recordResult(RequireSidecarParentValid, &err)
|
||||
if badParent != nil && badParent(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root is invalid")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Parent root is invalid")
|
||||
return errSidecarParentInvalid
|
||||
}
|
||||
return nil
|
||||
@@ -234,7 +234,7 @@ func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) {
|
||||
defer bv.recordResult(RequireSidecarParentSlotLower, &err)
|
||||
parentSlot, err := bv.fc.Slot(bv.blob.ParentRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(errSlotNotAfterParent, "parent root not in forkchoice")
|
||||
return errors.Wrap(errSlotNotAfterParent, "Parent root not in forkchoice")
|
||||
}
|
||||
if parentSlot >= bv.blob.Slot() {
|
||||
return errSlotNotAfterParent
|
||||
@@ -248,7 +248,7 @@ func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) {
|
||||
func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err)
|
||||
if !bv.fc.HasNode(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root not in forkchoice")
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Parent root not in forkchoice")
|
||||
return errSidecarNotFinalizedDescendent
|
||||
}
|
||||
return nil
|
||||
@@ -259,7 +259,7 @@ func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
func (bv *ROBlobVerifier) SidecarInclusionProven() (err error) {
|
||||
defer bv.recordResult(RequireSidecarInclusionProven, &err)
|
||||
if err = blocks.VerifyKZGInclusionProof(bv.blob); err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("sidecar inclusion proof verification failed")
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar inclusion proof verification failed")
|
||||
return ErrSidecarInclusionProofInvalid
|
||||
}
|
||||
return nil
|
||||
@@ -271,7 +271,7 @@ func (bv *ROBlobVerifier) SidecarInclusionProven() (err error) {
|
||||
func (bv *ROBlobVerifier) SidecarKzgProofVerified() (err error) {
|
||||
defer bv.recordResult(RequireSidecarKzgProofVerified, &err)
|
||||
if err = bv.verifyBlobCommitment(bv.blob); err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("kzg commitment proof verification failed")
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("Kzg commitment proof verification failed")
|
||||
return ErrSidecarKzgProofInvalid
|
||||
}
|
||||
return nil
|
||||
@@ -297,19 +297,19 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
|
||||
if !cached {
|
||||
pst, err := bv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("state replay to parent_root failed")
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("State replay to parent_root failed")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
idx, err = bv.pc.ComputeProposer(ctx, bv.blob.ParentRoot(), bv.blob.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("error computing proposer index from parent state")
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("Error computing proposer index from parent state")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
}
|
||||
if idx != bv.blob.ProposerIndex() {
|
||||
log.WithError(errSidecarUnexpectedProposer).
|
||||
WithFields(logging.BlobFields(bv.blob)).WithField("expectedProposer", idx).
|
||||
Debug("unexpected blob proposer")
|
||||
Debug("Unexpected blob proposer")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -88,7 +88,7 @@ func (c *sigCache) VerifySignature(sig signatureData, v validatorAtIndexer) (err
|
||||
if err == nil {
|
||||
c.Add(sig, true)
|
||||
} else {
|
||||
log.WithError(err).WithFields(sig.logFields()).Debug("caching failed signature verification result")
|
||||
log.WithError(err).WithFields(sig.logFields()).Debug("Caching failed signature verification result")
|
||||
c.Add(sig, false)
|
||||
}
|
||||
}()
|
||||
@@ -134,7 +134,7 @@ func (c *sigCache) SignatureVerified(sig signatureData) (bool, error) {
|
||||
}
|
||||
verified, ok := val.(bool)
|
||||
if !ok {
|
||||
log.WithFields(sig.logFields()).Debug("ignoring invalid value found in signature cache")
|
||||
log.WithFields(sig.logFields()).Debug("Ignoring invalid value found in signature cache")
|
||||
// This shouldn't happen, and if it does, the caller should treat it as a cache miss and run verification
|
||||
// again to correctly populate the cache key.
|
||||
return false, nil
|
||||
|
||||
3
changelog/tt_chicken.md
Normal file
3
changelog/tt_chicken.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add log capitalization analyzer and apply changes across codebase.
|
||||
@@ -31,7 +31,7 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
statePath := c.Path(StatePath.Name)
|
||||
remoteURL := c.String(BeaconAPIURL.Name)
|
||||
if remoteURL == "" && c.String(checkpoint.RemoteURL.Name) != "" {
|
||||
log.Infof("using checkpoint sync url %s for value in --%s flag", c.String(checkpoint.RemoteURL.Name), BeaconAPIURL.Name)
|
||||
log.Infof("Using checkpoint sync url %s for value in --%s flag", c.String(checkpoint.RemoteURL.Name), BeaconAPIURL.Name)
|
||||
remoteURL = c.String(checkpoint.RemoteURL.Name)
|
||||
}
|
||||
if remoteURL != "" {
|
||||
|
||||
@@ -135,7 +135,7 @@ func run(ctx *cli.Context) error {
|
||||
}
|
||||
err = upd.Update(r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("client-stats collector error")
|
||||
log.WithError(err).Error("Client stats collector error")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,13 +67,13 @@ func cliActionDownload(_ *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("saved ssz-encoded block to %s", blockPath)
|
||||
log.Printf("Saved ssz-encoded block to %s", blockPath)
|
||||
|
||||
statePath, err := od.SaveState(cwd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("saved ssz-encoded state to %s", statePath)
|
||||
log.Printf("Saved ssz-encoded state to %s", statePath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -233,7 +233,7 @@ func generateGenesis(ctx context.Context) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("reading deposits from JSON at %s", expanded)
|
||||
log.Printf("Reading deposits from JSON at %s", expanded)
|
||||
b, err := os.ReadFile(expanded) // #nosec G304
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -44,7 +44,7 @@ func WriteSsz(w http.ResponseWriter, respSsz []byte) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("could not write response message")
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -223,5 +223,13 @@
|
||||
".*/main\\.go": "main methods are OK",
|
||||
"external/.*": "Third party code"
|
||||
}
|
||||
},
|
||||
"logcapitalization": {
|
||||
"exclude_files": {
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*/.*_test\\.go": "Test logs can be less formal",
|
||||
"tools/analyzers/logcapitalization/testdata/.*": "Analyzer testdata has to break rules"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func (m *Miner) initAttempt(ctx context.Context, attempt int) (*os.File, error)
|
||||
if err = helpers.WaitForTextInFile(minerLog, "Started P2P networking"); err != nil {
|
||||
kerr := runCmd.Process.Kill()
|
||||
if kerr != nil {
|
||||
log.WithError(kerr).Error("error sending kill to failed miner command process")
|
||||
log.WithError(kerr).Error("Error sending kill to failed miner command process")
|
||||
}
|
||||
return nil, fmt.Errorf("P2P log not found, this means the eth1 chain had issues starting: %w", err)
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (m *Miner) Start(ctx context.Context) error {
|
||||
for attempt := 0; attempt < 3; attempt++ {
|
||||
minerLog, retryErr = m.initAttempt(ctx, attempt)
|
||||
if retryErr == nil {
|
||||
log.Infof("miner started after %d retries", attempt)
|
||||
log.Infof("Miner started after %d retries", attempt)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -236,7 +236,7 @@ func (m *Miner) Start(ctx context.Context) error {
|
||||
}
|
||||
dCount, err := depositContractCaller.GetDepositCount(&bind.CallOpts{})
|
||||
if err != nil {
|
||||
log.Error("failed to call get_deposit_count method of deposit contract")
|
||||
log.Error("Failed to call get_deposit_count method of deposit contract")
|
||||
return err
|
||||
}
|
||||
log.Infof("deposit contract count=%d", dCount)
|
||||
|
||||
@@ -135,13 +135,13 @@ func (node *Node) Start(ctx context.Context) error {
|
||||
if err = helpers.WaitForTextInFile(errLog, "Node revalidated"); err != nil {
|
||||
kerr := runCmd.Process.Kill()
|
||||
if kerr != nil {
|
||||
log.WithError(kerr).Error("error sending kill to failed node command process")
|
||||
log.WithError(kerr).Error("Error sending kill to failed node command process")
|
||||
}
|
||||
retryErr = fmt.Errorf("the first node revalidated log not found, this means the eth1 chain had issues starting: %w", err)
|
||||
continue
|
||||
}
|
||||
node.cmd = runCmd
|
||||
log.Infof("eth1 node started after %d retries", retries)
|
||||
log.Infof("Eth1 node started after %d retries", retries)
|
||||
break
|
||||
}
|
||||
if retryErr != nil {
|
||||
|
||||
@@ -210,6 +210,6 @@ func doSSZPOSTRequest(template, requestPath string, beaconNodeIdx int, postObj i
|
||||
|
||||
func closeBody(body io.Closer) {
|
||||
if err := body.Close(); err != nil {
|
||||
log.WithError(err).Error("could not close response body")
|
||||
log.WithError(err).Error("Could not close response body")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
|
||||
continue
|
||||
}
|
||||
if len(payload.FeeRecipient) == 0 || hexutil.Encode(payload.FeeRecipient) == params.BeaconConfig().EthBurnAddressHex {
|
||||
log.WithField("proposerIndex", bb.ProposerIndex).WithField("slot", bb.Slot).Error("fee recipient eval bug")
|
||||
log.WithField("proposerIndex", bb.ProposerIndex).WithField("slot", bb.Slot).Error("Fee recipient eval bug")
|
||||
return errors.New("fee recipient is not set")
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
|
||||
WithField("slot", bb.Slot).
|
||||
WithField("proposerIndex", bb.ProposerIndex).
|
||||
WithField("feeRecipient", fr.Hex()).
|
||||
Warn("unknown key observed, not a deterministically generated key")
|
||||
Warn("Unknown key observed, not a deterministically generated key")
|
||||
return errors.New("unknown key observed, not a deterministically generated key")
|
||||
}
|
||||
|
||||
|
||||
26
tools/analyzers/logcapitalization/BUILD.bazel
Normal file
26
tools/analyzers/logcapitalization/BUILD.bazel
Normal file
@@ -0,0 +1,26 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/tools/analyzers/logcapitalization",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/ast/inspector:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["analyzer_test.go"],
|
||||
data = glob(["testdata/**"]) + [
|
||||
"@go_sdk//:files",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//build/bazel:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/analysistest:go_default_library",
|
||||
],
|
||||
)
|
||||
333
tools/analyzers/logcapitalization/analyzer.go
Normal file
333
tools/analyzers/logcapitalization/analyzer.go
Normal file
@@ -0,0 +1,333 @@
|
||||
// Package logcapitalization implements a static analyzer to ensure all log messages
|
||||
// start with a capitalized letter for consistent log formatting.
|
||||
package logcapitalization
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
// Doc explaining the tool.
|
||||
const Doc = "Tool to enforce that all log messages start with a capitalized letter"
|
||||
|
||||
var errLogNotCapitalized = errors.New("log message should start with a capitalized letter for consistent formatting")
|
||||
|
||||
// Analyzer runs static analysis.
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "logcapitalization",
|
||||
Doc: Doc,
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Run: run,
|
||||
}
|
||||
|
||||
func run(pass *analysis.Pass) (interface{}, error) {
|
||||
inspection, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
|
||||
if !ok {
|
||||
return nil, errors.New("analyzer is not type *inspector.Inspector")
|
||||
}
|
||||
|
||||
nodeFilter := []ast.Node{
|
||||
(*ast.CallExpr)(nil),
|
||||
(*ast.File)(nil),
|
||||
}
|
||||
|
||||
// Track imports that might be used for logging
|
||||
hasLogImport := false
|
||||
logPackageAliases := make(map[string]bool)
|
||||
|
||||
// Common logging functions that output messages
|
||||
logFunctions := []string{
|
||||
// logrus
|
||||
"Info", "Infof", "InfoWithFields",
|
||||
"Debug", "Debugf", "DebugWithFields",
|
||||
"Warn", "Warnf", "WarnWithFields",
|
||||
"Error", "ErrorWithFields",
|
||||
"Fatal", "Fatalf", "FatalWithFields",
|
||||
"Panic", "Panicf", "PanicWithFields",
|
||||
"Print", "Printf", "Println",
|
||||
"Log", "Logf",
|
||||
// standard log
|
||||
"Print", "Printf", "Println",
|
||||
"Fatal", "Fatalf", "Fatalln",
|
||||
"Panic", "Panicf", "Panicln",
|
||||
// fmt excluded - often used for user prompts, not logging
|
||||
}
|
||||
|
||||
inspection.Preorder(nodeFilter, func(node ast.Node) {
|
||||
switch stmt := node.(type) {
|
||||
case *ast.File:
|
||||
// Reset per file
|
||||
hasLogImport = false
|
||||
logPackageAliases = make(map[string]bool)
|
||||
|
||||
// Check imports for logging packages
|
||||
for _, imp := range stmt.Imports {
|
||||
if imp.Path != nil {
|
||||
path := strings.Trim(imp.Path.Value, "\"")
|
||||
if isLoggingPackage(path) {
|
||||
hasLogImport = true
|
||||
|
||||
// Track package alias
|
||||
if imp.Name != nil {
|
||||
logPackageAliases[imp.Name.Name] = true
|
||||
} else {
|
||||
// Default package name from path
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) > 0 {
|
||||
logPackageAliases[parts[len(parts)-1]] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.CallExpr:
|
||||
if !hasLogImport {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this is a logging function call
|
||||
if !isLoggingCall(stmt, logFunctions, logPackageAliases) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check the first argument (message) for capitalization
|
||||
if len(stmt.Args) > 0 {
|
||||
firstArg := stmt.Args[0]
|
||||
|
||||
// Check if it's a format function (like Printf, Infof)
|
||||
if isFormatFunction(stmt) {
|
||||
checkFormatStringCapitalization(firstArg, pass, node)
|
||||
} else {
|
||||
checkMessageCapitalization(firstArg, pass, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// isLoggingPackage checks if the import path is a logging package
|
||||
func isLoggingPackage(path string) bool {
|
||||
loggingPaths := []string{
|
||||
"github.com/sirupsen/logrus",
|
||||
"log",
|
||||
"github.com/rs/zerolog",
|
||||
"go.uber.org/zap",
|
||||
"github.com/golang/glog",
|
||||
"k8s.io/klog",
|
||||
}
|
||||
|
||||
for _, logPath := range loggingPaths {
|
||||
if strings.Contains(path, logPath) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isLoggingCall checks if the call expression is a logging function
|
||||
func isLoggingCall(call *ast.CallExpr, logFunctions []string, aliases map[string]bool) bool {
|
||||
var functionName string
|
||||
var packageName string
|
||||
|
||||
switch fun := call.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
// Direct function call
|
||||
functionName = fun.Name
|
||||
case *ast.SelectorExpr:
|
||||
// Package.Function call
|
||||
functionName = fun.Sel.Name
|
||||
if ident, ok := fun.X.(*ast.Ident); ok {
|
||||
packageName = ident.Name
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if it's a logging function
|
||||
for _, logFunc := range logFunctions {
|
||||
if functionName == logFunc {
|
||||
// If no package specified, could be a logging call
|
||||
if packageName == "" {
|
||||
return true
|
||||
}
|
||||
// Check if package is a known logging package alias
|
||||
if aliases[packageName] {
|
||||
return true
|
||||
}
|
||||
// Check for common logging package names
|
||||
if isCommonLogPackage(packageName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isCommonLogPackage checks for common logging package names
|
||||
func isCommonLogPackage(pkg string) bool {
|
||||
common := []string{"log", "logrus", "zerolog", "zap", "glog", "klog"}
|
||||
for _, c := range common {
|
||||
if pkg == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isFormatFunction checks if this is a format function (ending with 'f')
|
||||
func isFormatFunction(call *ast.CallExpr) bool {
|
||||
switch fun := call.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
return strings.HasSuffix(fun.Name, "f")
|
||||
case *ast.SelectorExpr:
|
||||
return strings.HasSuffix(fun.Sel.Name, "f")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkFormatStringCapitalization checks if format strings start with capital letter
|
||||
func checkFormatStringCapitalization(expr ast.Expr, pass *analysis.Pass, node ast.Node) {
|
||||
if basicLit, ok := expr.(*ast.BasicLit); ok && basicLit.Kind == token.STRING {
|
||||
if len(basicLit.Value) >= 3 { // At least quotes + one character
|
||||
unquoted, err := strconv.Unquote(basicLit.Value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !isCapitalized(unquoted) {
|
||||
pass.Reportf(expr.Pos(),
|
||||
"%s: format string should start with a capital letter (found: %q)",
|
||||
errLogNotCapitalized.Error(),
|
||||
getFirstWord(unquoted))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkMessageCapitalization checks if message strings start with capital letter
|
||||
func checkMessageCapitalization(expr ast.Expr, pass *analysis.Pass, node ast.Node) {
|
||||
switch e := expr.(type) {
|
||||
case *ast.BasicLit:
|
||||
if e.Kind == token.STRING && len(e.Value) >= 3 {
|
||||
unquoted, err := strconv.Unquote(e.Value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !isCapitalized(unquoted) {
|
||||
pass.Reportf(expr.Pos(),
|
||||
"%s: log message should start with a capital letter (found: %q)",
|
||||
errLogNotCapitalized.Error(),
|
||||
getFirstWord(unquoted))
|
||||
}
|
||||
}
|
||||
case *ast.BinaryExpr:
|
||||
// For string concatenation, check the first part
|
||||
if e.Op == token.ADD {
|
||||
checkMessageCapitalization(e.X, pass, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isCapitalized checks if a string starts with a capital letter
|
||||
func isCapitalized(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return true // Empty strings are OK
|
||||
}
|
||||
|
||||
// Skip leading whitespace
|
||||
trimmed := strings.TrimLeft(s, " \t\n\r")
|
||||
if len(trimmed) == 0 {
|
||||
return true // Only whitespace is OK
|
||||
}
|
||||
|
||||
// Get the first character
|
||||
firstRune := []rune(trimmed)[0]
|
||||
|
||||
// Check for special cases that are acceptable
|
||||
if isAcceptableStart(firstRune, trimmed) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Must be uppercase letter
|
||||
return unicode.IsUpper(firstRune)
|
||||
}
|
||||
|
||||
// isAcceptableStart checks for acceptable ways to start log messages
|
||||
func isAcceptableStart(firstRune rune, s string) bool {
|
||||
// Numbers are OK
|
||||
if unicode.IsDigit(firstRune) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Special characters that are OK to start with
|
||||
acceptableChars := []rune{'%', '$', '/', '\\', '[', '(', '{', '"', '\'', '`', '-'}
|
||||
for _, char := range acceptableChars {
|
||||
if firstRune == char {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// URLs/paths are OK
|
||||
if strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://") || strings.HasPrefix(s, "file://") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Command line flags are OK (--flag, -flag)
|
||||
if strings.HasPrefix(s, "--") || (strings.HasPrefix(s, "-") && len(s) > 1 && unicode.IsLetter([]rune(s)[1])) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Configuration keys or technical terms in lowercase are sometimes OK
|
||||
if strings.Contains(s, "=") || strings.Contains(s, ":") {
|
||||
// Looks like a key=value or key: value format
|
||||
return true
|
||||
}
|
||||
|
||||
// Technical keywords that are acceptable in lowercase
|
||||
technicalKeywords := []string{"gRPC"}
|
||||
|
||||
// Check if the string starts with any technical keyword
|
||||
lowerS := strings.ToLower(s)
|
||||
for _, keyword := range technicalKeywords {
|
||||
if strings.HasPrefix(lowerS, strings.ToLower(keyword)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getFirstWord extracts the first few characters for error reporting
|
||||
func getFirstWord(s string) string {
|
||||
trimmed := strings.TrimLeft(s, " \t\n\r")
|
||||
if len(trimmed) == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
words := strings.Fields(trimmed)
|
||||
if len(words) > 0 {
|
||||
if len(words[0]) > 20 {
|
||||
return words[0][:20] + "..."
|
||||
}
|
||||
return words[0]
|
||||
}
|
||||
|
||||
// Fallback to first 20 characters
|
||||
if len(trimmed) > 20 {
|
||||
return trimmed[:20] + "..."
|
||||
}
|
||||
return trimmed
|
||||
}
|
||||
21
tools/analyzers/logcapitalization/analyzer_test.go
Normal file
21
tools/analyzers/logcapitalization/analyzer_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package logcapitalization_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/go/analysis/analysistest"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/build/bazel"
|
||||
"github.com/OffchainLabs/prysm/v6/tools/analyzers/logcapitalization"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if bazel.BuiltWithBazel() {
|
||||
bazel.SetGoEnv()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzer(t *testing.T) {
|
||||
testdata := analysistest.TestData()
|
||||
analysistest.RunWithSuggestedFixes(t, testdata, logcapitalization.Analyzer, "a")
|
||||
}
|
||||
65
tools/analyzers/logcapitalization/testdata/src/a/a.go
vendored
Normal file
65
tools/analyzers/logcapitalization/testdata/src/a/a.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package testdata
|
||||
|
||||
import (
|
||||
logrus "log" // Use standard log package as alias to simulate logrus
|
||||
)
|
||||
|
||||
func BadCapitalization() {
|
||||
// These should trigger the analyzer
|
||||
logrus.Print("hello world") // want "log message should start with a capital letter"
|
||||
logrus.Printf("starting the process") // want "format string should start with a capital letter"
|
||||
|
||||
// Simulating logrus-style calls
|
||||
Info("connection failed") // want "log message should start with a capital letter"
|
||||
Infof("failed to process %d blocks", 5) // want "format string should start with a capital letter"
|
||||
Error("low disk space") // want "log message should start with a capital letter"
|
||||
Debug("processing attestation") // want "log message should start with a capital letter"
|
||||
|
||||
// More examples
|
||||
Warn("validator not found") // want "log message should start with a capital letter"
|
||||
}
|
||||
|
||||
func GoodCapitalization() {
|
||||
// These should NOT trigger the analyzer
|
||||
logrus.Print("Hello world")
|
||||
logrus.Printf("Starting the beacon chain process")
|
||||
|
||||
// Simulating logrus-style calls with proper capitalization
|
||||
Info("Connection established successfully")
|
||||
Infof("Processing %d blocks in epoch %d", 5, 100)
|
||||
Error("Connection failed with timeout")
|
||||
Errorf("Failed to process %d blocks", 5)
|
||||
Warn("Low disk space detected")
|
||||
Debug("Processing attestation for validator")
|
||||
|
||||
// Fun blockchain-specific examples with proper capitalization
|
||||
Info("Validator activated successfully")
|
||||
Info("New block mined with hash 0x123abc")
|
||||
Info("Checkpoint finalized at epoch 50000")
|
||||
Info("Sync committee duties assigned")
|
||||
Info("Fork choice updated to new head")
|
||||
|
||||
// Acceptable edge cases - these should NOT trigger
|
||||
Info("404 validator not found") // Numbers are OK
|
||||
Info("/eth/v1/beacon/blocks endpoint") // Paths are OK
|
||||
Info("config=mainnet") // Config format is OK
|
||||
Info("https://beacon-node.example.com") // URLs are OK
|
||||
Infof("%s network started", "mainnet") // Format specifiers are OK
|
||||
Debug("--weak-subjectivity-checkpoint not provided") // CLI flags are OK
|
||||
Debug("-v flag enabled") // Single dash flags are OK
|
||||
Info("--datadir=/tmp/beacon") // Flags with values are OK
|
||||
|
||||
// Empty or whitespace
|
||||
Info("") // Empty is OK
|
||||
Info(" ") // Just whitespace is OK
|
||||
}
|
||||
|
||||
// Mock logrus-style functions for testing
|
||||
func Info(msg string) { logrus.Print(msg) }
|
||||
func Infof(format string, args ...any) { logrus.Printf(format, args...) }
|
||||
func Error(msg string) { logrus.Print(msg) }
|
||||
func Errorf(format string, args ...any) { logrus.Printf(format, args...) }
|
||||
func Warn(msg string) { logrus.Print(msg) }
|
||||
func Warnf(format string, args ...any) { logrus.Printf(format, args...) }
|
||||
func Debug(msg string) { logrus.Print(msg) }
|
||||
func Debugf(format string, args ...any) { logrus.Printf(format, args...) }
|
||||
@@ -87,7 +87,7 @@ func main() {
|
||||
// check if the database file is present.
|
||||
dbNameWithPath := filepath.Join(*datadir, *dbName)
|
||||
if _, err := os.Stat(dbNameWithPath); os.IsNotExist(err) {
|
||||
log.WithError(err).WithField("path", dbNameWithPath).Fatal("could not locate database file")
|
||||
log.WithError(err).WithField("path", dbNameWithPath).Fatal("Could not locate database file")
|
||||
}
|
||||
|
||||
switch *command {
|
||||
@@ -104,7 +104,7 @@ func main() {
|
||||
case "migration-check":
|
||||
destDbNameWithPath := filepath.Join(*destDatadir, *dbName)
|
||||
if _, err := os.Stat(destDbNameWithPath); os.IsNotExist(err) {
|
||||
log.WithError(err).WithField("path", destDbNameWithPath).Fatal("could not locate database file")
|
||||
log.WithError(err).WithField("path", destDbNameWithPath).Fatal("Could not locate database file")
|
||||
}
|
||||
switch *migrationName {
|
||||
case "validator-entries":
|
||||
@@ -133,14 +133,14 @@ func printBucketContents(dbNameWithPath string, rowLimit uint64, bucketName stri
|
||||
dbDirectory := filepath.Dir(dbNameWithPath)
|
||||
db, openErr := kv.NewKVStore(context.Background(), dbDirectory)
|
||||
if openErr != nil {
|
||||
log.WithError(openErr).Fatal("could not open db")
|
||||
log.WithError(openErr).Fatal("Could not open db")
|
||||
}
|
||||
|
||||
// don't forget to close it when ejecting out of this function.
|
||||
defer func() {
|
||||
closeErr := db.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Fatal("could not close db")
|
||||
log.WithError(closeErr).Fatal("Could not close db")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -166,14 +166,14 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) {
|
||||
// open the raw database file. If the file is busy, then exit.
|
||||
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
if openErr != nil {
|
||||
log.WithError(openErr).Fatal("could not open db to show bucket stats")
|
||||
log.WithError(openErr).Fatal("Could not open db to show bucket stats")
|
||||
}
|
||||
|
||||
// make sure we close the database before ejecting out of this function.
|
||||
defer func() {
|
||||
closeErr := db.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Fatalf("could not close db after showing bucket stats")
|
||||
log.WithError(closeErr).Fatalf("Could not close db after showing bucket stats")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -188,7 +188,7 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) {
|
||||
return nil
|
||||
})
|
||||
}); viewErr1 != nil {
|
||||
log.WithError(viewErr1).Fatal("could not read buckets from db while getting list of buckets")
|
||||
log.WithError(viewErr1).Fatal("Could not read buckets from db while getting list of buckets")
|
||||
}
|
||||
|
||||
// for every bucket, calculate the stats and send it for printing.
|
||||
@@ -258,7 +258,7 @@ func readStates(ctx context.Context, db *kv.Store, stateC chan<- *modifiedState,
|
||||
for rowCount, key := range keys {
|
||||
st, stateErr := db.State(ctx, bytesutil.ToBytes32(key))
|
||||
if stateErr != nil {
|
||||
log.WithError(stateErr).Errorf("could not get state for key : %s", hexutils.BytesToHex(key))
|
||||
log.WithError(stateErr).Errorf("Could not get state for key : %s", hexutils.BytesToHex(key))
|
||||
continue
|
||||
}
|
||||
mst := &modifiedState{
|
||||
@@ -282,7 +282,7 @@ func readStateSummary(ctx context.Context, db *kv.Store, stateSummaryC chan<- *m
|
||||
for rowCount, key := range keys {
|
||||
ss, ssErr := db.StateSummary(ctx, bytesutil.ToBytes32(key))
|
||||
if ssErr != nil {
|
||||
log.WithError(ssErr).Errorf("could not get state summary for key : %s", hexutils.BytesToHex(key))
|
||||
log.WithError(ssErr).Errorf("Could not get state summary for key : %s", hexutils.BytesToHex(key))
|
||||
continue
|
||||
}
|
||||
mst := &modifiedStateSummary{
|
||||
@@ -377,14 +377,14 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
||||
destStateKeys, _ := keysOfBucket(destDbNameWithPath, []byte("state"), MaxUint64)
|
||||
|
||||
if len(destStateKeys) < len(sourceStateKeys) {
|
||||
log.Fatalf("destination keys are lesser then source keys (%d/%d)", len(sourceStateKeys), len(destStateKeys))
|
||||
log.Fatalf("Destination keys are lesser then source keys (%d/%d)", len(sourceStateKeys), len(destStateKeys))
|
||||
}
|
||||
|
||||
// create the source and destination KV stores.
|
||||
sourceDbDirectory := filepath.Dir(dbNameWithPath)
|
||||
sourceDB, openErr := kv.NewKVStore(context.Background(), sourceDbDirectory)
|
||||
if openErr != nil {
|
||||
log.WithError(openErr).Fatal("could not open sourceDB")
|
||||
log.WithError(openErr).Fatal("Could not open sourceDB")
|
||||
}
|
||||
|
||||
destinationDbDirectory := filepath.Dir(destDbNameWithPath)
|
||||
@@ -394,7 +394,7 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
||||
// if you want to avoid this then we should pass the metric name when opening the DB which touches
|
||||
// too many places.
|
||||
if openErr.Error() != "duplicate metrics collector registration attempted" {
|
||||
log.WithError(openErr).Fatalf("could not open sourceDB")
|
||||
log.WithError(openErr).Fatalf("Could not open sourceDB")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -402,13 +402,13 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
||||
defer func() {
|
||||
closeErr := sourceDB.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Fatal("could not close sourceDB")
|
||||
log.WithError(closeErr).Fatal("Could not close sourceDB")
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
closeErr := destDB.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Fatal("could not close sourceDB")
|
||||
log.WithError(closeErr).Fatal("Could not close sourceDB")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -417,11 +417,11 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
||||
for rowCount, key := range sourceStateKeys[910:] {
|
||||
sourceState, stateErr := sourceDB.State(ctx, bytesutil.ToBytes32(key))
|
||||
if stateErr != nil {
|
||||
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from source db, the state for key")
|
||||
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("Could not get from source db, the state for key")
|
||||
}
|
||||
destinationState, stateErr := destDB.State(ctx, bytesutil.ToBytes32(key))
|
||||
if stateErr != nil {
|
||||
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from destination db, the state for key")
|
||||
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("Could not get from destination db, the state for key")
|
||||
}
|
||||
if destinationState == nil {
|
||||
log.Infof("could not find state in migrated DB: index = %d, slot = %d, epoch = %d, numOfValidators = %d, key = %s",
|
||||
@@ -435,11 +435,11 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
||||
}
|
||||
sourceStateHash, err := sourceState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("could not find hash of source state")
|
||||
log.WithError(err).Fatal("Could not find hash of source state")
|
||||
}
|
||||
destinationStateHash, err := destinationState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("could not find hash of destination state")
|
||||
log.WithError(err).Fatal("Could not find hash of destination state")
|
||||
}
|
||||
if !bytes.Equal(sourceStateHash[:], destinationStateHash[:]) {
|
||||
log.Fatalf("state mismatch : key = %s", hexutils.BytesToHex(key))
|
||||
@@ -452,14 +452,14 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([]
|
||||
// open the raw database file. If the file is busy, then exit.
|
||||
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
if openErr != nil {
|
||||
log.WithError(openErr).Fatal("could not open db while getting keys of a bucket")
|
||||
log.WithError(openErr).Fatal("Could not open db while getting keys of a bucket")
|
||||
}
|
||||
|
||||
// make sure we close the database before ejecting out of this function.
|
||||
defer func() {
|
||||
closeErr := db.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Fatal("could not close db while getting keys of a bucket")
|
||||
log.WithError(closeErr).Fatal("Could not close db while getting keys of a bucket")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -484,7 +484,7 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([]
|
||||
}
|
||||
return nil
|
||||
}); viewErr != nil {
|
||||
log.WithError(viewErr).Fatal("could not read keys of bucket from db")
|
||||
log.WithError(viewErr).Fatal("Could not read keys of bucket from db")
|
||||
}
|
||||
return keys, sizes
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func main() {
|
||||
for _, endpt := range endpts {
|
||||
conn, err := grpc.Dial(endpt, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("fail to dial")
|
||||
log.WithError(err).Fatal("Fail to dial")
|
||||
}
|
||||
clients[endpt] = pb.NewBeaconChainClient(conn)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
func mergeProfiles(p, merge *cover.Profile) {
|
||||
if p.Mode != merge.Mode {
|
||||
log.Fatalf("cannot merge profiles with different modes")
|
||||
log.Fatalf("Cannot merge profiles with different modes")
|
||||
}
|
||||
// Since the blocks are sorted, we can keep track of where the last block
|
||||
// was inserted and only look at the blocks after that as targets for merge
|
||||
@@ -107,7 +107,7 @@ func main() {
|
||||
for _, file := range flag.Args() {
|
||||
profiles, err := cover.ParseProfiles(file)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to parse profiles")
|
||||
log.WithError(err).Fatal("Failed to parse profiles")
|
||||
}
|
||||
for _, p := range profiles {
|
||||
merged = addProfile(merged, p)
|
||||
|
||||
@@ -393,7 +393,7 @@ func benchmarkHash(sszPath string, sszType string) {
|
||||
runtime.ReadMemStats(stat)
|
||||
root, err := stateTrieState.HashTreeRoot(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal("couldn't hash")
|
||||
log.Fatal("Couldn't hash")
|
||||
}
|
||||
newStat := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(newStat)
|
||||
|
||||
@@ -37,7 +37,7 @@ func (c *grpcNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethpb.Pee
|
||||
func (c *grpcNodeClient) IsHealthy(ctx context.Context) bool {
|
||||
_, err := c.nodeClient.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get health of node")
|
||||
log.WithError(err).Error("Failed to get health of node")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -33,7 +33,7 @@ func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesReques
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDutiesV2(ctx, in)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
log.Warn("beaconNodeValidatorClient.GetDutiesV2() returned status code unavailable, falling back to GetDuties")
|
||||
log.Warn("GetDutiesV2 returned status code unavailable, falling back to GetDuties")
|
||||
return c.getDuties(ctx, in)
|
||||
}
|
||||
return nil, errors.Wrap(
|
||||
|
||||
@@ -275,7 +275,7 @@ func (*FakeValidator) HasProposerSettings() bool {
|
||||
func (fv *FakeValidator) PushProposerSettings(ctx context.Context, _ primitives.Slot, _ bool) error {
|
||||
time.Sleep(fv.ProposerSettingWait)
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
log.Error("deadline exceeded")
|
||||
log.Error("Deadline exceeded")
|
||||
// can't return error as it will trigger a log.fatal
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ func (v *validator) UpdateDuties(ctx context.Context) error {
|
||||
v.dutiesLock.Lock()
|
||||
v.duties = nil // Clear assignments so we know to retry the request.
|
||||
v.dutiesLock.Unlock()
|
||||
log.WithError(err).Error("error getting validator duties")
|
||||
log.WithError(err).Error("Error getting validator duties")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1141,7 +1141,7 @@ func (v *validator) PushProposerSettings(ctx context.Context, slot primitives.Sl
|
||||
if len(signedRegReqs) > 0 {
|
||||
go func() {
|
||||
if err := SubmitValidatorRegistrations(ctx, v.validatorClient, signedRegReqs, v.validatorsRegBatchSize); err != nil {
|
||||
log.WithError(errors.Wrap(ErrBuilderValidatorRegistration, err.Error())).Warn("failed to register validator on builder")
|
||||
log.WithError(errors.Wrap(ErrBuilderValidatorRegistration, err.Error())).Warn("Failed to register validator on builder")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func (km *Keymanager) ImportKeystores(
|
||||
}
|
||||
}
|
||||
if len(importedKeys) == 0 {
|
||||
log.Warn("no keys were imported")
|
||||
log.Warn("No keys were imported")
|
||||
return statuses, nil
|
||||
}
|
||||
// 2) Update copied keystore with new keys,clear duplicates in existing set
|
||||
|
||||
@@ -243,7 +243,7 @@ func TestLocalKeymanager_ImportKeystores(t *testing.T) {
|
||||
fmt.Sprintf("incorrect password for key 0x%s", keystores[1].Pubkey),
|
||||
statuses[1].Message,
|
||||
)
|
||||
require.LogsContain(t, hook, "no keys were imported")
|
||||
require.LogsContain(t, hook, "No keys were imported")
|
||||
})
|
||||
t.Run("file write fails during import", func(t *testing.T) {
|
||||
wallet.HasWriteFileError = true
|
||||
|
||||
@@ -174,7 +174,7 @@ func (client *ApiClient) doRequest(ctx context.Context, httpMethod, fullPath str
|
||||
"status": resp.StatusCode,
|
||||
"request": string(requestDump),
|
||||
"response": string(responseDump),
|
||||
}).Error("web3signer request failed")
|
||||
}).Error("Web3signer request failed")
|
||||
}
|
||||
if resp.StatusCode == http.StatusInternalServerError {
|
||||
err = fmt.Errorf("internal Web3Signer server error, Signing Request URL: %v Status: %v", fullPath, resp.StatusCode)
|
||||
@@ -217,6 +217,6 @@ func unmarshalSignatureResponse(responseBody io.ReadCloser) (bls.Signature, erro
|
||||
// closeBody a utility method to wrap an error for closing
|
||||
func closeBody(body io.Closer) {
|
||||
if err := body.Close(); err != nil {
|
||||
log.WithError(err).Error("could not close response body")
|
||||
log.WithError(err).Error("Could not close response body")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ func (c *ValidatorClient) getLegacyDatabaseLocation(
|
||||
|
||||
func getWallet(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
if cliCtx.IsSet(flags.InteropNumValidators.Name) {
|
||||
log.Info("no wallet required for interop validation")
|
||||
log.Info("No wallet required for interop validation")
|
||||
return nil, nil
|
||||
}
|
||||
if cliCtx.IsSet(flags.Web3SignerURLFlag.Name) {
|
||||
|
||||
Reference in New Issue
Block a user