Unify log fields (#13654)

* unify fields

* fix tests
This commit is contained in:
Radosław Kapka
2024-02-22 23:40:36 +01:00
committed by GitHub
parent 7a9608ea20
commit 0b261cba5e
45 changed files with 190 additions and 190 deletions

View File

@@ -108,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
} }
log. log.
WithField("block_slot", b.Block().Slot()). WithField("blockSlot", b.Block().Slot()).
WithField("state_slot", s.Slot()). WithField("stateSlot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])). WithField("stateRoot", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])). WithField("blockRoot", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.") Info("Downloaded checkpoint sync state and block.")
return &OriginData{ return &OriginData{
st: s, st: s,

View File

@@ -310,8 +310,8 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
for _, failure := range errorJson.Failures { for _, failure := range errorJson.Failures {
w := request[failure.Index].Message w := request[failure.Index].Message
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex, "validatorIndex": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress, "withdrawalAddress": w.ToExecutionAddress,
}).Error(failure.Message) }).Error(failure.Message)
} }
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message) return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)

View File

@@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil) b := bytes.NewBuffer(nil)
if r.Body == nil { if r.Body == nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"body-base64": "(nil value)", "bodyBase64": "(nil value)",
"url": r.URL.String(), "url": r.URL.String(),
}).Info("builder http request") }).Info("builder http request")
return nil return nil
} }
@@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
} }
r.Body = io.NopCloser(b) r.Body = io.NopCloser(b)
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"body-base64": string(body), "bodyBase64": string(body),
"url": r.URL.String(), "url": r.URL.String(),
}).Info("builder http request") }).Info("builder http request")
return nil return nil

View File

@@ -419,7 +419,7 @@ func (s *Service) startFromExecutionChain() error {
log.Error("event data is not type *statefeed.ChainStartedData") log.Error("event data is not type *statefeed.ChainStartedData")
return return
} }
log.WithField("starttime", data.StartTime).Debug("Received chain start event") log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime) s.onExecutionChainStart(s.ctx, data.StartTime)
return return
} }

View File

@@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
defer span.End() defer span.End()
if d == nil { if d == nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"block": blockNum, "block": blockNum,
"deposit": d, "deposit": d,
"index": index, "index": index,
"deposit root": hex.EncodeToString(depositRoot[:]), "depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion") }).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache") return errors.New("nil deposit inserted into the cache")
} }

View File

@@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
} }
if d == nil { if d == nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"block": blockNum, "block": blockNum,
"deposit": d, "deposit": d,
"index": index, "index": index,
"deposit root": hex.EncodeToString(depositRoot[:]), "depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion") }).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache") return errors.New("nil deposit inserted into the cache")
} }

View File

@@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
} }
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ChainStartTime": chainStartTime, "chainStartTime": chainStartTime,
}).Info("Minimum number of validators reached for beacon-chain to start") }).Info("Minimum number of validators reached for beacon-chain to start")
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{ s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted, Type: statefeed.ChainStarted,

View File

@@ -639,7 +639,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
} }
fields := logrus.Fields{ fields := logrus.Fields{
"Additional validators needed": valNeeded, "additionalValidatorsNeeded": valNeeded,
} }
if secondsLeft > 0 { if secondsLeft > 0 {
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second

View File

@@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target) // logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields { func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{ return logrus.Fields{
"ValidatorIndex": idx, "validatorIndex": idx,
"Slot": data.Slot, "slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)), "source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)), "target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)), "head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
} }
} }
@@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
aggregatedPerf.totalCorrectTarget++ aggregatedPerf.totalCorrectTarget++
} }
} }
logFields["CorrectHead"] = latestPerf.timelyHead logFields["correctHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource logFields["correctSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget logFields["correctTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot logFields["inclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance logFields["newBalance"] = balance
logFields["BalanceChange"] = balanceChg logFields["balanceChange"] = balanceChg
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
@@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot) root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root) st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil { if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache") "Skipping unaggregated attestation due to state not found in cache")
return return
} }
@@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
defer s.Unlock() defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) { if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"AggregatorIndex": att.AggregatorIndex, "aggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot, "slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc( "beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)), att.Aggregate.Data.BeaconBlockRoot)),
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc( "sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)), att.Aggregate.Data.Source.Root)),
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc( "targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)), att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation") }).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex] aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
@@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
copy(root[:], att.Aggregate.Data.BeaconBlockRoot) copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root) st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil { if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping aggregated attestation due to state not found in cache") "Skipping aggregated attestation due to state not found in cache")
return return
} }

View File

@@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b11, 0b1}, AggregationBits: bitfield.Bitlist{0b11, 0b1},
} }
s.processIncludedAttestation(context.Background(), state, att) s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1) require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2) require.LogsContain(t, hook, wanted2)
} }
@@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
} }
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att) s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1) require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2) require.LogsContain(t, hook, wanted2)
} }
@@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
}, },
} }
s.processAggregatedAttestation(ctx, att) s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor") require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache") require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel) logrus.SetLevel(logrus.InfoLevel)
} }
@@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att) s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor") require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor") require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor") require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
} }
func TestProcessAttestations(t *testing.T) { func TestProcessAttestations(t *testing.T) {
@@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
wrappedBlock, err := blocks.NewBeaconBlock(block) wrappedBlock, err := blocks.NewBeaconBlock(block)
require.NoError(t, err) require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock) s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
require.LogsContain(t, hook, wanted1) require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2) require.LogsContain(t, hook, wanted2)

View File

@@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
} }
st := s.config.StateGen.StateByRootIfCachedNoCopy(root) st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil { if st == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping block collection due to state not found in cache") "Skipping block collection due to state not found in cache")
return return
} }
@@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
parentRoot := blk.ParentRoot() parentRoot := blk.ParentRoot()
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ProposerIndex": blk.ProposerIndex(), "proposerIndex": blk.ProposerIndex(),
"Slot": blk.Slot(), "slot": blk.Slot(),
"Version": blk.Version(), "version": blk.Version(),
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])), "parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])), "blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"NewBalance": balance, "newBalance": balance,
"BalanceChange": balanceChg, "balanceChange": balanceChg,
}).Info("Proposed beacon block was included") }).Info("Proposed beacon block was included")
} }
} }
@@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
idx := slashing.Header_1.Header.ProposerIndex idx := slashing.Header_1.Header.ProposerIndex
if s.trackedIndex(idx) { if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ProposerIndex": idx, "proposerIndex": idx,
"Slot": blk.Slot(), "slot": blk.Slot(),
"SlashingSlot": slashing.Header_1.Header.Slot, "slashingSlot": slashing.Header_1.Header.Slot,
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)), "bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)), "bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
}).Info("Proposer slashing was included") }).Info("Proposer slashing was included")
} }
} }
@@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
for _, idx := range blocks.SlashableAttesterIndices(slashing) { for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.trackedIndex(primitives.ValidatorIndex(idx)) { if s.trackedIndex(primitives.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"AttesterIndex": idx, "attesterIndex": idx,
"BlockInclusionSlot": blk.Slot(), "blockInclusionSlot": blk.Slot(),
"AttestationSlot1": slashing.Attestation_1.Data.Slot, "attestationSlot1": slashing.Attestation_1.Data.Slot,
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)), "beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch, "sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch, "targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"AttestationSlot2": slashing.Attestation_2.Data.Slot, "attestationSlot2": slashing.Attestation_2.Data.Slot,
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)), "beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch, "sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch, "targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included") }).Info("Attester slashing was included")
} }
} }
@@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount) percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ValidatorIndex": idx, "validatorIndex": idx,
"StartEpoch": p.startEpoch, "startEpoch": p.startEpoch,
"StartBalance": p.startBalance, "startBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount, "totalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100), "attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100), "balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100), "correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100), "correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100), "correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance), "averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount, "totalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations, "totalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncCommitteeContributions, "totalSyncContributions": p.totalSyncCommitteeContributions,
}).Info("Aggregated performance since launch") }).Info("Aggregated performance since launch")
} }
} }

View File

@@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
}, },
}, },
}, },
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2", wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
}, },
{ {
name: "Proposer slashing an untracked index", name: "Proposer slashing an untracked index",
@@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
}, },
}, },
}, },
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " + wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0", "beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
}, },
{ {
name: "Attester slashing untracked index", name: "Attester slashing untracked index",
@@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
StateRoot: bytesutil.PadTo([]byte("state-world"), 32), StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
Body: &ethpb.BeaconBlockBody{}, Body: &ethpb.BeaconBlockBody{},
}, },
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor", wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
}, },
{ {
name: "Block proposed by untracked validator", name: "Block proposed by untracked validator",
@@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
root, err := b.GetBlock().HashTreeRoot() root, err := b.GetBlock().HashTreeRoot()
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis)) require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:])) wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx) wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor" wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor" wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
wrapped, err := blocks.NewSignedBeaconBlock(b) wrapped, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err) require.NoError(t, err)
s.processBlock(ctx, wrapped) s.processBlock(ctx, wrapped)
@@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
} }
s.logAggregatedPerformance() s.logAggregatedPerformance()
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" + wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " + " averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " + "correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " + "startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
"ValidatorIndex=1 prefix=monitor" "validatorIndex=1"
require.LogsContain(t, hook, wanted) require.LogsContain(t, hook, wanted)
} }

View File

@@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
idx := exit.Exit.ValidatorIndex idx := exit.Exit.ValidatorIndex
if s.trackedIndex(idx) { if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ValidatorIndex": idx, "validatorIndex": idx,
"Slot": blk.Slot(), "slot": blk.Slot(),
}).Info("Voluntary exit was included") }).Info("Voluntary exit was included")
} }
} }
@@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
defer s.RUnlock() defer s.RUnlock()
if s.trackedIndex(idx) { if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ValidatorIndex": idx, "validatorIndex": idx,
}).Info("Voluntary exit was processed") }).Info("Voluntary exit was processed")
} }
} }

View File

@@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
wb, err := blocks.NewBeaconBlock(block) wb, err := blocks.NewBeaconBlock(block)
require.NoError(t, err) require.NoError(t, err)
s.processExitsFromBlock(wb) s.processExitsFromBlock(wb)
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2") require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
} }
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) { func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
@@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
Signature: make([]byte, 96), Signature: make([]byte, 96),
} }
s.processExit(exit) s.processExit(exit)
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1") require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
} }
func TestProcessExitP2PUntrackedIndices(t *testing.T) { func TestProcessExitP2PUntrackedIndices(t *testing.T) {

View File

@@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
aggPerf.totalSyncCommitteeAggregations++ aggPerf.totalSyncCommitteeAggregations++
s.aggregatedPerformance[idx] = aggPerf s.aggregatedPerformance[idx] = aggPerf
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed") log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
} }
} }
@@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib)) fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ValidatorIndex": validatorIdx, "validatorIndex": validatorIdx,
"ExpectedContribCount": len(committeeIndices), "expectedContribCount": len(committeeIndices),
"ContribCount": contrib, "contribCount": contrib,
"NewBalance": balance, "newBalance": balance,
"BalanceChange": balanceChg, "balanceChange": balanceChg,
}).Info("Sync committee contribution included") }).Info("Sync committee contribution included")
} }
} }

View File

@@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
} }
s.processSyncCommitteeContribution(contrib) s.processSyncCommitteeContribution(contrib)
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1") require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2") require.LogsDoNotContain(t, hook, "validatorIndex=2")
} }
func TestProcessSyncAggregate(t *testing.T) { func TestProcessSyncAggregate(t *testing.T) {
@@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
s.processSyncAggregate(beaconState, wrappedBlock) s.processSyncAggregate(beaconState, wrappedBlock)
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor") require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor") require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2") require.LogsDoNotContain(t, hook, "validatorIndex=2")
} }

View File

@@ -111,7 +111,7 @@ func (s *Service) Start() {
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] }) sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"ValidatorIndices": tracked, "validatorIndices": tracked,
}).Info("Starting service") }).Info("Starting service")
go s.run() go s.run()
@@ -134,7 +134,7 @@ func (s *Service) run() {
} }
epoch := slots.ToEpoch(st.Slot()) epoch := slots.ToEpoch(st.Slot())
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance") log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
s.Lock() s.Lock()
s.initializePerformanceStructures(st, epoch) s.initializePerformanceStructures(st, epoch)
@@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
for idx := range s.TrackedValidators { for idx := range s.TrackedValidators {
balance, err := state.BalanceAtIndex(idx) balance, err := state.BalanceAtIndex(idx)
if err != nil { if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error( log.WithError(err).WithField("validatorIndex", idx).Error(
"Could not fetch starting balance, skipping aggregated logs.") "Could not fetch starting balance, skipping aggregated logs.")
balance = 0 balance = 0
} }
@@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
for idx := range s.TrackedValidators { for idx := range s.TrackedValidators {
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx) syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
if err != nil { if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error( log.WithError(err).WithField("validatorIndex", idx).Error(
"Sync committee assignments will not be reported") "Sync committee assignments will not be reported")
delete(s.trackedSyncCommitteeIndices, idx) delete(s.trackedSyncCommitteeIndices, idx)
} else if len(syncIdx) == 0 { } else if len(syncIdx) == 0 {

View File

@@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
// wait for Logrus // wait for Logrus
time.Sleep(1000 * time.Millisecond) time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance") require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"") require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
s.Lock() s.Lock()
require.Equal(t, s.isLogging, true, "monitor is not running") require.Equal(t, s.isLogging, true, "monitor is not running")
s.Unlock() s.Unlock()
@@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
// Wait for Logrus // Wait for Logrus
time.Sleep(1000 * time.Millisecond) time.Sleep(1000 * time.Millisecond)
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:])) wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
require.LogsContain(t, hook, wanted1) require.LogsContain(t, hook, wanted1)
} }

View File

@@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) { if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
c := params.BeaconConfig() c := params.BeaconConfig()
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name) c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden") log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
params.OverrideBeaconConfig(c) params.OverrideBeaconConfig(c)
} }
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) { if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
c := params.BeaconConfig() c := params.BeaconConfig()
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name)) c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden") log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
params.OverrideBeaconConfig(c) params.OverrideBeaconConfig(c)
} }
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) { if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
c := params.BeaconConfig() c := params.BeaconConfig()
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name)) c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden") log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
params.OverrideBeaconConfig(c) params.OverrideBeaconConfig(c)
} }

View File

@@ -426,7 +426,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
clearDB := cliCtx.Bool(cmd.ClearDB.Name) clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name) forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("database-path", dbPath).Info("Checking DB") log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath) d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil { if err != nil {
@@ -529,7 +529,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
clearDB := cliCtx.Bool(cmd.ClearDB.Name) clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name) forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("database-path", dbPath).Info("Checking DB") log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath) d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil { if err != nil {

View File

@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
switch slotInterval.Interval { switch slotInterval.Interval {
case 0: case 0:
duration := time.Since(t) duration := time.Since(t)
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations") log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds())) batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1: case 1:
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds())) batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))

View File

@@ -238,7 +238,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root)) m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"block root": hexutil.Encode(root), "blockRoot": hexutil.Encode(root),
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root)) }).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal} return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
} }
@@ -254,8 +254,8 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index) vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"block root": hexutil.Encode(root), "blockRoot": hexutil.Encode(root),
"blob index": index, "blobIndex": index,
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index)) }).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal} return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
} }

View File

@@ -146,8 +146,8 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
if shouldRebuildTrie(canonicalEth1Data.DepositCount, uint64(len(upToEth1DataDeposits))) { if shouldRebuildTrie(canonicalEth1Data.DepositCount, uint64(len(upToEth1DataDeposits))) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"unfinalized deposits": len(upToEth1DataDeposits), "unfinalizedDeposits": len(upToEth1DataDeposits),
"total deposit count": canonicalEth1Data.DepositCount, "totalDepositCount": canonicalEth1Data.DepositCount,
}).Warn("Too many unfinalized deposits, building a deposit trie from scratch.") }).Warn("Too many unfinalized deposits, building a deposit trie from scratch.")
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight) return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
} }

View File

@@ -193,7 +193,7 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
if err != nil { if err != nil {
return status.Error(codes.Canceled, "Context canceled") return status.Error(codes.Canceled, "Context canceled")
} }
log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event") log.WithField("startTime", clock.GenesisTime()).Debug("Received chain started event")
log.Debug("Sending genesis time notification to connected validator clients") log.Debug("Sending genesis time notification to connected validator clients")
gvr := clock.GenesisValidatorsRoot() gvr := clock.GenesisValidatorsRoot()
res := &ethpb.ChainStartResponse{ res := &ethpb.ChainStartResponse{

View File

@@ -196,7 +196,7 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) || if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) { (prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found") log.WithField("validatorIndex", valIndex).Infof("Participation flag found")
resp.Responses = append(resp.Responses, resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{ &ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey, PublicKey: v.PublicKey,

View File

@@ -436,8 +436,8 @@ func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlo
if err != nil { if err != nil {
if errors.Is(err, consensus_types.ErrUnsupportedField) { if errors.Is(err, consensus_types.ErrUnsupportedField) {
log. log.
WithField("block_slot", block.Slot()). WithField("blockSlot", block.Slot()).
WithField("retention_start", blobWindowStart). WithField("retentionStart", blobWindowStart).
Warn("block with slot within blob retention period has version which does not support commitments") Warn("block with slot within blob retention period has version which does not support commitments")
continue continue
} }

View File

@@ -99,7 +99,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
// Skip blocks that are already being processed. // Skip blocks that are already being processed.
if s.cfg.chain.BlockBeingSynced(blkRoot) { if s.cfg.chain.BlockBeingSynced(blkRoot) {
log.WithField("BlockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed") log.WithField("blockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed")
continue continue
} }

View File

@@ -210,5 +210,5 @@ func (l *limiter) retrieveCollector(topic string) (*leakybucket.Collector, error
} }
func (_ *limiter) topicLogger(topic string) *logrus.Entry { func (_ *limiter) topicLogger(topic string) *logrus.Entry {
return log.WithField("rate limiter", topic) return log.WithField("rateLimiter", topic)
} }

View File

@@ -128,7 +128,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
log.WithField("error", r). log.WithField("error", r).
WithField("recovered_at", "registerRPC"). WithField("recoveredAt", "registerRPC").
WithField("stack", string(debug.Stack())). WithField("stack", string(debug.Stack())).
Error("Panic occurred") Error("Panic occurred")
} }

View File

@@ -106,7 +106,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb
} }
defer closeStream(stream, log) defer closeStream(stream, log)
log := log.WithField("Reason", goodbyeMessage(code)) log := log.WithField("reason", goodbyeMessage(code))
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Sending Goodbye message to peer") log.WithField("peer", stream.Conn().RemotePeer()).Trace("Sending Goodbye message to peer")
// Wait up to the response timeout for the peer to receive the goodbye // Wait up to the response timeout for the peer to receive the goodbye

View File

@@ -295,11 +295,11 @@ func (s *Service) waitForChainStart() {
} }
s.cfg.clock = clock s.cfg.clock = clock
startTime := clock.GenesisTime() startTime := clock.GenesisTime()
log.WithField("starttime", startTime).Debug("Received state initialized event") log.WithField("startTime", startTime).Debug("Received state initialized event")
ctxMap, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot()) ctxMap, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
if err != nil { if err != nil {
log.WithError(err).WithField("genesis_validator_root", clock.GenesisValidatorsRoot()). log.WithError(err).WithField("genesisValidatorRoot", clock.GenesisValidatorsRoot()).
Error("sync service failed to initialize context version map") Error("sync service failed to initialize context version map")
return return
} }
@@ -311,7 +311,7 @@ func (s *Service) waitForChainStart() {
if startTime.After(prysmTime.Now()) { if startTime.After(prysmTime.Now()) {
time.Sleep(prysmTime.Until(startTime)) time.Sleep(prysmTime.Until(startTime))
} }
log.WithField("starttime", startTime).Debug("Chain started in sync service") log.WithField("startTime", startTime).Debug("Chain started in sync service")
s.markForChainStart() s.markForChainStart()
} }

View File

@@ -202,7 +202,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
if r := recover(); r != nil { if r := recover(); r != nil {
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r)) tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
log.WithField("error", r). log.WithField("error", r).
WithField("recovered_at", "subscribeWithBase"). WithField("recoveredAt", "subscribeWithBase").
WithField("stack", string(debug.Stack())). WithField("stack", string(debug.Stack())).
Error("Panic occurred") Error("Panic occurred")
} }
@@ -290,9 +290,9 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
fields := logrus.Fields{ fields := logrus.Fields{
"topic": topic, "topic": topic,
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()), "multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
"peer id": pid.String(), "peerID": pid.String(),
"agent": agentString(pid, s.cfg.p2p.Host()), "agent": agentString(pid, s.cfg.p2p.Host()),
"gossip score": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid), "gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
} }
if features.Get().EnableFullSSZDataLogging { if features.Get().EnableFullSSZDataLogging {
fields["message"] = hexutil.Encode(msg.Data) fields["message"] = hexutil.Encode(msg.Data)
@@ -305,9 +305,9 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
log.WithError(err).WithFields(logrus.Fields{ log.WithError(err).WithFields(logrus.Fields{
"topic": topic, "topic": topic,
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()), "multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
"peer id": pid.String(), "peerID": pid.String(),
"agent": agentString(pid, s.cfg.p2p.Host()), "agent": agentString(pid, s.cfg.p2p.Host()),
"gossip score": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid), "gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
}).Debugf("Gossip message was ignored") }).Debugf("Gossip message was ignored")
} }
messageIgnoredValidationCounter.WithLabelValues(topic).Inc() messageIgnoredValidationCounter.WithLabelValues(topic).Inc()

View File

@@ -310,7 +310,7 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
} }
if idx != bv.blob.ProposerIndex() { if idx != bv.blob.ProposerIndex() {
log.WithError(ErrSidecarUnexpectedProposer). log.WithError(ErrSidecarUnexpectedProposer).
WithFields(logging.BlobFields(bv.blob)).WithField("expected_proposer", idx). WithFields(logging.BlobFields(bv.blob)).WithField("expectedProposer", idx).
Debug("unexpected blob proposer") Debug("unexpected blob proposer")
return ErrSidecarUnexpectedProposer return ErrSidecarUnexpectedProposer
} }

View File

@@ -53,11 +53,11 @@ type SignatureData struct {
func (d SignatureData) logFields() log.Fields { func (d SignatureData) logFields() log.Fields {
return log.Fields{ return log.Fields{
"root": fmt.Sprintf("%#x", d.Root), "root": fmt.Sprintf("%#x", d.Root),
"parent_root": fmt.Sprintf("%#x", d.Parent), "parentRoot": fmt.Sprintf("%#x", d.Parent),
"signature": fmt.Sprintf("%#x", d.Signature), "signature": fmt.Sprintf("%#x", d.Signature),
"proposer": d.Proposer, "proposer": d.Proposer,
"slot": d.Slot, "slot": d.Slot,
} }
} }

View File

@@ -144,8 +144,8 @@ func checkIfWithdrawsAreInPool(ctx context.Context, client *beacon.Client, reque
if len(requestMap) != 0 { if len(requestMap) != 0 {
for key, address := range requestMap { for key, address := range requestMap {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"validator_index": key, "validatorIndex": key,
"execution_address:": address, "executionAddress:": address,
}).Warn("Set withdrawal address message not found in the node's operations pool.") }).Warn("Set withdrawal address message not found in the node's operations pool.")
} }
log.Warn("Please check before resubmitting. Set withdrawal address messages that were not found in the pool may have been already included into a block.") log.Warn("Please check before resubmitting. Set withdrawal address messages that were not found in the pool may have been already included into a block.")

View File

@@ -11,12 +11,12 @@ import (
// which can be passed to log.WithFields. // which can be passed to log.WithFields.
func BlobFields(blob blocks.ROBlob) logrus.Fields { func BlobFields(blob blocks.ROBlob) logrus.Fields {
return logrus.Fields{ return logrus.Fields{
"slot": blob.Slot(), "slot": blob.Slot(),
"proposer_index": blob.ProposerIndex(), "proposerIndex": blob.ProposerIndex(),
"block_root": fmt.Sprintf("%#x", blob.BlockRoot()), "blockRoot": fmt.Sprintf("%#x", blob.BlockRoot()),
"parent_root": fmt.Sprintf("%#x", blob.ParentRoot()), "parentRoot": fmt.Sprintf("%#x", blob.ParentRoot()),
"kzg_commitment": fmt.Sprintf("%#x", blob.KzgCommitment), "kzgCommitment": fmt.Sprintf("%#x", blob.KzgCommitment),
"index": blob.Index, "index": blob.Index,
} }
} }
@@ -24,9 +24,9 @@ func BlobFields(blob blocks.ROBlob) logrus.Fields {
// all other sidecars for the block. // all other sidecars for the block.
func BlockFieldsFromBlob(blob blocks.ROBlob) logrus.Fields { func BlockFieldsFromBlob(blob blocks.ROBlob) logrus.Fields {
return logrus.Fields{ return logrus.Fields{
"slot": blob.Slot(), "slot": blob.Slot(),
"proposer_index": blob.ProposerIndex(), "proposerIndex": blob.ProposerIndex(),
"block_root": fmt.Sprintf("%#x", blob.BlockRoot()), "blockRoot": fmt.Sprintf("%#x", blob.BlockRoot()),
"parent_root": fmt.Sprintf("%#x", blob.ParentRoot()), "parentRoot": fmt.Sprintf("%#x", blob.ParentRoot()),
} }
} }

View File

@@ -185,9 +185,9 @@ func (node *BeaconNode) saveGenesis(ctx context.Context) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
log.WithField("fork_version", g.Fork().CurrentVersion). log.WithField("forkVersion", g.Fork().CurrentVersion).
WithField("latest_block_header.root", fmt.Sprintf("%#x", lbhr)). WithField("latestBlockHeaderRoot", fmt.Sprintf("%#x", lbhr)).
WithField("state_root", fmt.Sprintf("%#x", root)). WithField("stateRoot", fmt.Sprintf("%#x", root)).
Infof("BeaconState info") Infof("BeaconState info")
genesisBytes, err := g.MarshalSSZ() genesisBytes, err := g.MarshalSSZ()

View File

@@ -290,9 +290,9 @@ func (node *LighthouseBeaconNode) saveGenesis(ctx context.Context, testNetDir st
if err != nil { if err != nil {
return err return err
} }
log.WithField("fork_version", g.Fork().CurrentVersion). log.WithField("forkVersion", g.Fork().CurrentVersion).
WithField("latest_block_header.root", fmt.Sprintf("%#x", lbhr)). WithField("latestBlockHeaderRoot", fmt.Sprintf("%#x", lbhr)).
WithField("state_root", fmt.Sprintf("%#x", root)). WithField("stateRoot", fmt.Sprintf("%#x", root)).
Infof("BeaconState info") Infof("BeaconState info")
genesisBytes, err := g.MarshalSSZ() genesisBytes, err := g.MarshalSSZ()

View File

@@ -103,7 +103,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
continue continue
} }
if len(payload.FeeRecipient) == 0 || hexutil.Encode(payload.FeeRecipient) == params.BeaconConfig().EthBurnAddressHex { if len(payload.FeeRecipient) == 0 || hexutil.Encode(payload.FeeRecipient) == params.BeaconConfig().EthBurnAddressHex {
log.WithField("proposer_index", bb.ProposerIndex).WithField("slot", bb.Slot).Error("fee recipient eval bug") log.WithField("proposerIndex", bb.ProposerIndex).WithField("slot", bb.Slot).Error("fee recipient eval bug")
return errors.New("fee recipient is not set") return errors.New("fee recipient is not set")
} }
@@ -132,8 +132,8 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
if !knownKey { if !knownKey {
log.WithField("pubkey", pk). log.WithField("pubkey", pk).
WithField("slot", bb.Slot). WithField("slot", bb.Slot).
WithField("proposer_index", bb.ProposerIndex). WithField("proposerIndex", bb.ProposerIndex).
WithField("fee_recipient", fr.Hex()). WithField("feeRecipient", fr.Hex()).
Warn("unknown key observed, not a deterministically generated key") Warn("unknown key observed, not a deterministically generated key")
return errors.New("unknown key observed, not a deterministically generated key") return errors.New("unknown key observed, not a deterministically generated key")
} }

View File

@@ -123,21 +123,21 @@ func compareHeads(clients map[string]pb.BeaconChainClient) {
func logHead(endpt string, head *pb.ChainHead) { func logHead(endpt string, head *pb.ChainHead) {
log.WithFields( log.WithFields(
logrus.Fields{ logrus.Fields{
"HeadSlot": head.HeadSlot, "headSlot": head.HeadSlot,
"HeadRoot": hex.EncodeToString(head.HeadBlockRoot), "headRoot": hex.EncodeToString(head.HeadBlockRoot),
"JustifiedEpoch": head.JustifiedEpoch, "justifiedEpoch": head.JustifiedEpoch,
"JustifiedRoot": hex.EncodeToString(head.JustifiedBlockRoot), "justifiedRoot": hex.EncodeToString(head.JustifiedBlockRoot),
"FinalizedEpoch": head.FinalizedEpoch, "finalizedEpoch": head.FinalizedEpoch,
"FinalizedRoot": hex.EncodeToString(head.FinalizedBlockRoot), "finalizedRoot": hex.EncodeToString(head.FinalizedBlockRoot),
}).Info("Head from beacon node ", endpt) }).Info("Head from beacon node ", endpt)
} }
func logParticipation(endpt string, p *pb.ValidatorParticipation) { func logParticipation(endpt string, p *pb.ValidatorParticipation) {
log.WithFields( log.WithFields(
logrus.Fields{ logrus.Fields{
"VotedEther": p.VotedEther, "votedEther": p.VotedEther,
"TotalEther": p.EligibleEther, "totalEther": p.EligibleEther,
"ParticipationRate": p.GlobalParticipationRate, "participationRate": p.GlobalParticipationRate,
}).Info("Participation rate from beacon node ", endpt) }).Info("Participation rate from beacon node ", endpt)
} }

View File

@@ -83,7 +83,7 @@ func zipKeystoresToOutputDir(keystoresToBackup []*keymanager.Keystore, outputDir
} }
} }
log.WithField( log.WithField(
"backup-path", archivePath, "backupPath", archivePath,
).Infof("Successfully backed up %d accounts", len(keystoresToBackup)) ).Infof("Successfully backed up %d accounts", len(keystoresToBackup))
return nil return nil
} }

View File

@@ -247,7 +247,7 @@ func OpenOrCreateNewWallet(cliCtx *cli.Context) (*Wallet, error) {
if err := w.SaveWallet(); err != nil { if err := w.SaveWallet(); err != nil {
return nil, errors.Wrap(err, "could not save wallet to disk") return nil, errors.Wrap(err, "could not save wallet to disk")
} }
log.WithField("wallet-path", walletDir).Info( log.WithField("walletPath", walletDir).Info(
"Successfully created new wallet", "Successfully created new wallet",
) )
return w, nil return w, nil

View File

@@ -36,7 +36,7 @@ func (acm *CLIManager) WalletCreate(ctx context.Context) (*wallet.Wallet, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.WithField("--wallet-dir", acm.walletDir).Info( log.WithField("walletDir", acm.walletDir).Info(
"Successfully created wallet with ability to import keystores", "Successfully created wallet with ability to import keystores",
) )
case keymanager.Derived: case keymanager.Derived:
@@ -50,7 +50,7 @@ func (acm *CLIManager) WalletCreate(ctx context.Context) (*wallet.Wallet, error)
); err != nil { ); err != nil {
return nil, errors.Wrap(err, "could not initialize wallet") return nil, errors.Wrap(err, "could not initialize wallet")
} }
log.WithField("--wallet-dir", acm.walletDir).Info( log.WithField("walletDir", acm.walletDir).Info(
"Successfully created HD wallet from mnemonic and regenerated accounts", "Successfully created HD wallet from mnemonic and regenerated accounts",
) )
case keymanager.Web3Signer: case keymanager.Web3Signer:

View File

@@ -48,7 +48,7 @@ func (acm *CLIManager) WalletRecover(ctx context.Context) (*wallet.Wallet, error
if err := km.RecoverAccountsFromMnemonic(ctx, acm.mnemonic, acm.mnemonicLanguage, acm.mnemonic25thWord, acm.numAccounts); err != nil { if err := km.RecoverAccountsFromMnemonic(ctx, acm.mnemonic, acm.mnemonicLanguage, acm.mnemonic25thWord, acm.numAccounts); err != nil {
return nil, err return nil, err
} }
log.WithField("wallet-path", w.AccountsDir()).Infof( log.WithField("walletPath", w.AccountsDir()).Infof(
"Successfully recovered HD wallet with %d accounts. Please use `accounts list` to view details for your accounts", "Successfully recovered HD wallet with %d accounts. Please use `accounts list` to view details for your accounts",
acm.numAccounts, acm.numAccounts,
) )

View File

@@ -271,8 +271,8 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context, router *mux.Rou
c.wallet = w c.wallet = w
// TODO(#9883) - Remove this when we have a better way to handle this. // TODO(#9883) - Remove this when we have a better way to handle this.
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"wallet": w.AccountsDir(), "wallet": w.AccountsDir(),
"keymanager-kind": w.KeymanagerKind().String(), "keymanagerKind": w.KeymanagerKind().String(),
}).Info("Opened validator wallet") }).Info("Opened validator wallet")
} }
} }