From 9d375969d19e70b0075acced92ee4613788ae56e Mon Sep 17 00:00:00 2001 From: Preston Van Loon Date: Fri, 5 Aug 2022 05:52:02 -0500 Subject: [PATCH] Enforce log.WithError(err) static analysis and fix all violations (#11163) * Use log.WithError static analysis from #11143 and fix all violations * Fix another log violation after pulling from develop * Update beacon-chain/sync/pending_blocks_queue.go Co-authored-by: Potuz * @potuz feedback * Copy paste fail * fix tests Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: Potuz --- BUILD.bazel | 1 + beacon-chain/blockchain/metrics.go | 2 +- beacon-chain/blockchain/receive_block.go | 4 +- beacon-chain/blockchain/service.go | 12 +++--- .../cache/depositcache/pending_deposits.go | 4 +- beacon-chain/core/blocks/deposit.go | 2 +- beacon-chain/core/helpers/sync_committee.go | 4 +- beacon-chain/deterministic-genesis/service.go | 16 ++++---- beacon-chain/execution/deposit_test.go | 3 +- beacon-chain/execution/log_processing.go | 4 +- beacon-chain/execution/rpc_connection.go | 4 +- beacon-chain/execution/service.go | 2 +- beacon-chain/node/node.go | 2 +- beacon-chain/p2p/addr_factory.go | 2 +- beacon-chain/p2p/broadcaster.go | 2 +- beacon-chain/p2p/discovery.go | 6 +-- beacon-chain/p2p/gossip_scoring_params.go | 12 +++--- beacon-chain/p2p/log.go | 2 +- beacon-chain/p2p/options.go | 8 ++-- beacon-chain/p2p/service.go | 4 +- beacon-chain/p2p/utils.go | 2 +- beacon-chain/p2p/watch_peers.go | 2 +- .../rpc/prysm/v1alpha1/beacon/attestations.go | 2 +- .../v1alpha1/validator/proposer_deposits.go | 4 +- beacon-chain/rpc/service.go | 4 +- beacon-chain/sync/pending_blocks_queue.go | 6 +-- beacon-chain/sync/rpc.go | 4 +- beacon-chain/sync/rpc_goodbye.go | 2 +- beacon-chain/sync/rpc_status.go | 4 +- cmd/beacon-chain/db/db.go | 2 +- cmd/client-stats/main.go | 4 +- cmd/validator/accounts/accounts.go | 10 ++--- cmd/validator/db/db.go | 6 +-- cmd/validator/wallet/wallet.go | 6 +-- cmd/validator/web/web.go | 2 +- monitoring/prometheus/service.go | 4 +- nogo_config.json | 5 +++ runtime/debug/debug.go | 10 ++--- runtime/prereqs/prereq.go | 2 +- tools/benchmark-files-gen/BUILD.bazel | 1 + tools/benchmark-files-gen/main.go | 8 ++-- tools/bootnode/bootnode.go | 2 +- tools/enr-calculator/main.go | 6 +-- tools/exploredb/main.go | 40 +++++++++---------- tools/forkchecker/forkchecker.go | 2 +- tools/genesis-state-gen/BUILD.bazel | 1 + tools/genesis-state-gen/main.go | 24 +++++------ tools/gocovmerge/BUILD.bazel | 5 ++- tools/gocovmerge/main.go | 4 +- tools/interop/convert-keys/BUILD.bazel | 1 + tools/interop/convert-keys/main.go | 12 +++--- validator/client/aggregate.go | 10 ++--- validator/client/runner.go | 34 ++++++++-------- validator/client/service.go | 6 +-- validator/client/sync_committee.go | 14 +++---- validator/client/sync_committee_test.go | 3 +- .../remote-web3signer/internal/client.go | 2 +- validator/keymanager/remote/keymanager.go | 2 +- validator/rpc/intercepter.go | 6 ++- validator/rpc/server.go | 6 +-- validator/rpc/standard_api.go | 2 +- 61 files changed, 193 insertions(+), 175 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index 25783b4568..77647da762 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -122,6 +122,7 @@ nogo( "//tools/analyzers/gocognit:go_default_library", "//tools/analyzers/ineffassign:go_default_library", "//tools/analyzers/interfacechecker:go_default_library", + "//tools/analyzers/logruswitherror:go_default_library", "//tools/analyzers/maligned:go_default_library", "//tools/analyzers/nop:go_default_library", "//tools/analyzers/properpermissions:go_default_library", diff --git a/beacon-chain/blockchain/metrics.go b/beacon-chain/blockchain/metrics.go index 1c96a44b61..76cb8be51d 100644 --- a/beacon-chain/blockchain/metrics.go +++ b/beacon-chain/blockchain/metrics.go @@ -198,7 +198,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt for i, validator := range postState.Validators() { bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i)) if err != nil { - log.Errorf("Could not load validator balance: %v", err) + log.WithError(err).Error("Could not load validator balance") continue } if validator.Slashed { diff --git a/beacon-chain/blockchain/receive_block.go b/beacon-chain/blockchain/receive_block.go index 3dcdb1ac38..c4ff16d860 100644 --- a/beacon-chain/blockchain/receive_block.go +++ b/beacon-chain/blockchain/receive_block.go @@ -128,7 +128,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig // log.Fatalf will prevent defer from being called span.End() // Exit run time if the node failed to verify weak subjectivity checkpoint. - log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err) + log.WithError(err).Fatal("Could not verify weak subjectivity checkpoint") } return nil @@ -152,7 +152,7 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error { // Add block attestations to the fork choice pool to compute head. if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil { - log.Errorf("Could not save block attestations for fork choice: %v", err) + log.WithError(err).Error("Could not save block attestations for fork choice") return nil } // Mark block exits as seen so we don't include same ones in future blocks. diff --git a/beacon-chain/blockchain/service.go b/beacon-chain/blockchain/service.go index 1c4a444e39..f15cbb941a 100644 --- a/beacon-chain/blockchain/service.go +++ b/beacon-chain/blockchain/service.go @@ -395,12 +395,12 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState() initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data()) if err != nil { - log.Fatalf("Could not initialize beacon chain: %v", err) + log.WithError(err).Fatal("Could not initialize beacon chain") } // We start a counter to genesis, if needed. gRoot, err := initializedState.HashTreeRoot(s.ctx) if err != nil { - log.Fatalf("Could not hash tree root genesis state: %v", err) + log.WithError(err).Fatal("Could not hash tree root genesis state") } go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot) @@ -473,7 +473,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState) if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil { - log.Fatalf("Could not process genesis block for fork choice: %v", err) + log.WithError(err).Fatal("Could not process genesis block for fork choice") } s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot) // Set genesis as fully validated @@ -483,7 +483,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix())) if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil { - log.Fatalf("Could not set head: %v", err) + log.WithError(err).Fatal("Could not set head") } return nil } @@ -508,11 +508,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d gState, err := db.GenesisState(ctx) if err != nil { - log.Fatalf("Could not retrieve genesis state: %v", err) + log.WithError(err).Fatal("Could not retrieve genesis state") } gRoot, err := gState.HashTreeRoot(ctx) if err != nil { - log.Fatalf("Could not hash tree root genesis state: %v", err) + log.WithError(err).Fatal("Could not hash tree root genesis state") } go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot) } diff --git a/beacon-chain/cache/depositcache/pending_deposits.go b/beacon-chain/cache/depositcache/pending_deposits.go index b383840d38..a5bde0d2b2 100644 --- a/beacon-chain/cache/depositcache/pending_deposits.go +++ b/beacon-chain/cache/depositcache/pending_deposits.go @@ -100,7 +100,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos depRoot, err := hash.HashProto(d) if err != nil { - log.Errorf("Could not remove deposit %v", err) + log.WithError(err).Error("Could not remove deposit") return } @@ -111,7 +111,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos for i, ctnr := range dc.pendingDeposits { h, err := hash.HashProto(ctnr.Deposit) if err != nil { - log.Errorf("Could not hash deposit %v", err) + log.WithError(err).Error("Could not hash deposit") continue } if h == depRoot { diff --git a/beacon-chain/core/blocks/deposit.go b/beacon-chain/core/blocks/deposit.go index 4ca7f68ddf..88b889adff 100644 --- a/beacon-chain/core/blocks/deposit.go +++ b/beacon-chain/core/blocks/deposit.go @@ -176,7 +176,7 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif } if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil { // Ignore this error as in the spec pseudo code. - log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err) + log.WithError(err).Debug("Skipping deposit: could not verify deposit data signature") return beaconState, newValidator, nil } } diff --git a/beacon-chain/core/helpers/sync_committee.go b/beacon-chain/core/helpers/sync_committee.go index 4744c5525f..aeb47bcfbb 100644 --- a/beacon-chain/core/helpers/sync_committee.go +++ b/beacon-chain/core/helpers/sync_committee.go @@ -46,7 +46,7 @@ func IsCurrentPeriodSyncCommittee( // Fill in the cache on miss. go func() { if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil { - log.Errorf("Could not fill sync committee cache on miss: %v", err) + log.WithError(err).Error("Could not fill sync committee cache on miss") } }() @@ -110,7 +110,7 @@ func CurrentPeriodSyncSubcommitteeIndices( // Fill in the cache on miss. go func() { if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil { - log.Errorf("Could not fill sync committee cache on miss: %v", err) + log.WithError(err).Error("Could not fill sync committee cache on miss") } }() diff --git a/beacon-chain/deterministic-genesis/service.go b/beacon-chain/deterministic-genesis/service.go index d1ff093ed7..67c5be77de 100644 --- a/beacon-chain/deterministic-genesis/service.go +++ b/beacon-chain/deterministic-genesis/service.go @@ -63,18 +63,18 @@ func (s *Service) Start() { if s.cfg.GenesisPath != "" { data, err := os.ReadFile(s.cfg.GenesisPath) if err != nil { - log.Fatalf("Could not read pre-loaded state: %v", err) + log.WithError(err).Fatal("Could not read pre-loaded state") } genesisState := ðpb.BeaconState{} if err := genesisState.UnmarshalSSZ(data); err != nil { - log.Fatalf("Could not unmarshal pre-loaded state: %v", err) + log.WithError(err).Fatal("Could not unmarshal pre-loaded state") } genesisTrie, err := v1.InitializeFromProto(genesisState) if err != nil { - log.Fatalf("Could not get state trie: %v", err) + log.WithError(err).Fatal("Could not get state trie") } if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil { - log.Fatalf("Could not save interop genesis state %v", err) + log.WithError(err).Fatal("Could not save interop genesis state") } return } @@ -82,11 +82,11 @@ func (s *Service) Start() { // Save genesis state in db genesisState, _, err := interop.GenerateGenesisState(s.ctx, s.cfg.GenesisTime, s.cfg.NumValidators) if err != nil { - log.Fatalf("Could not generate interop genesis state: %v", err) + log.WithError(err).Fatal("Could not generate interop genesis state") } genesisTrie, err := v1.InitializeFromProto(genesisState) if err != nil { - log.Fatalf("Could not get state trie: %v", err) + log.WithError(err).Fatal("Could not get state trie") } if s.cfg.GenesisTime == 0 { // Generated genesis time; fetch it @@ -94,12 +94,12 @@ func (s *Service) Start() { } gRoot, err := genesisTrie.HashTreeRoot(s.ctx) if err != nil { - log.Fatalf("Could not hash tree root genesis state: %v", err) + log.WithError(err).Fatal("Could not hash tree root genesis state") } go slots.CountdownToGenesis(s.ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot) if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil { - log.Fatalf("Could not save interop genesis state %v", err) + log.WithError(err).Fatal("Could not save interop genesis state") } } diff --git a/beacon-chain/execution/deposit_test.go b/beacon-chain/execution/deposit_test.go index 0b3786ad2a..7af0336a51 100644 --- a/beacon-chain/execution/deposit_test.go +++ b/beacon-chain/execution/deposit_test.go @@ -190,7 +190,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) { err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) require.NoError(t, err) - require.LogsContain(t, hook, "could not verify deposit data signature: could not convert bytes to signature") + require.LogsContain(t, hook, "could not verify deposit data signature") + require.LogsContain(t, hook, "could not convert bytes to signature") } func TestProcessDeposit_UnableToVerify(t *testing.T) { diff --git a/beacon-chain/execution/log_processing.go b/beacon-chain/execution/log_processing.go index d120f357db..5bc12458fd 100644 --- a/beacon-chain/execution/log_processing.go +++ b/beacon-chain/execution/log_processing.go @@ -184,7 +184,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethtypes.Lo DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)), } if err := s.processDeposit(ctx, eth1Data, deposit); err != nil { - log.Errorf("Invalid deposit processed: %v", err) + log.WithError(err).Error("Invalid deposit processed") validData = false } } else { @@ -236,7 +236,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, for i := range s.chainStartData.ChainstartDeposits { proof, err := s.depositTrie.MerkleProof(i) if err != nil { - log.Errorf("unable to generate deposit proof %v", err) + log.WithError(err).Error("unable to generate deposit proof") } s.chainStartData.ChainstartDeposits[i].Proof = proof } diff --git a/beacon-chain/execution/rpc_connection.go b/beacon-chain/execution/rpc_connection.go index 559135538b..2eb10bd582 100644 --- a/beacon-chain/execution/rpc_connection.go +++ b/beacon-chain/execution/rpc_connection.go @@ -51,7 +51,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) { logCounter := 0 errorLogger := func(err error, msg string) { if logCounter > logThreshold { - log.Errorf("%s: %v", msg, err) + log.WithError(err).Error(msg) logCounter = 0 } logCounter++ @@ -114,7 +114,7 @@ func (s *Service) checkDefaultEndpoint(ctx context.Context) { currClient := s.rpcClient if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil { - log.Debugf("Primary endpoint not ready: %v", err) + log.WithError(err).Debug("Primary endpoint not ready") return } // Close previous client, if connection was successful. diff --git a/beacon-chain/execution/service.go b/beacon-chain/execution/service.go index 16723401fe..9dc78d1276 100644 --- a/beacon-chain/execution/service.go +++ b/beacon-chain/execution/service.go @@ -552,7 +552,7 @@ func (s *Service) initPOWService() { logCounter := 0 errorLogger := func(err error, msg string) { if logCounter > logThreshold { - log.Errorf("%s: %v", msg, err) + log.WithError(err).Error(msg) logCounter = 0 } logCounter++ diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 25560637d9..b4c6b08ff0 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -345,7 +345,7 @@ func (b *BeaconNode) Close() { log.Info("Stopping beacon node") b.services.StopAll() if err := b.db.Close(); err != nil { - log.Errorf("Failed to close database: %v", err) + log.WithError(err).Error("Failed to close database") } b.collector.unregister() b.cancel() diff --git a/beacon-chain/p2p/addr_factory.go b/beacon-chain/p2p/addr_factory.go index e60cc2bcad..2535b1a6ad 100644 --- a/beacon-chain/p2p/addr_factory.go +++ b/beacon-chain/p2p/addr_factory.go @@ -23,7 +23,7 @@ func withRelayAddrs(relay string) config.AddrsFactory { } relayAddr, err := ma.NewMultiaddr(relay + "/p2p-circuit" + a.String()) if err != nil { - log.Errorf("Failed to create multiaddress for relay node: %v", err) + log.WithError(err).Error("Failed to create multiaddress for relay node") } else { relayAddrs = append(relayAddrs, relayAddr) } diff --git a/beacon-chain/p2p/broadcaster.go b/beacon-chain/p2p/broadcaster.go index 4915b31d3a..d617aa950a 100644 --- a/beacon-chain/p2p/broadcaster.go +++ b/beacon-chain/p2p/broadcaster.go @@ -186,7 +186,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs // In the event our sync message is outdated and beyond the // acceptable threshold, we exit early and do not broadcast it. if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil { - log.Warnf("Sync Committee Message is too old to broadcast, discarding it. %v", err) + log.WithError(err).Warn("Sync Committee Message is too old to broadcast, discarding it") return } diff --git a/beacon-chain/p2p/discovery.go b/beacon-chain/p2p/discovery.go index d77edce4a1..d42a1439c3 100644 --- a/beacon-chain/p2p/discovery.go +++ b/beacon-chain/p2p/discovery.go @@ -49,7 +49,7 @@ func (s *Service) RefreshENR() { } currentBitV, err := attBitvector(s.dv5Listener.Self().Record()) if err != nil { - log.Errorf("Could not retrieve att bitfield: %v", err) + log.WithError(err).Error("Could not retrieve att bitfield") return } // Compare current epoch with our fork epochs @@ -67,7 +67,7 @@ func (s *Service) RefreshENR() { } currentBitS, err := syncBitvector(s.dv5Listener.Self().Record()) if err != nil { - log.Errorf("Could not retrieve sync bitfield: %v", err) + log.WithError(err).Error("Could not retrieve sync bitfield") return } if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) && @@ -356,7 +356,7 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) { multiAddrString = append(multiAddrString, addr) continue } - log.Errorf("Invalid address of %s provided: %v", addr, err) + log.WithError(err).Errorf("Invalid address of %s provided", addr) } return enodeString, multiAddrString } diff --git a/beacon-chain/p2p/gossip_scoring_params.go b/beacon-chain/p2p/gossip_scoring_params.go index 8ce3e692f3..743c32a292 100644 --- a/beacon-chain/p2p/gossip_scoring_params.go +++ b/beacon-chain/p2p/gossip_scoring_params.go @@ -196,13 +196,13 @@ func defaultAggregateTopicParams(activeValidators uint64) *pubsub.TopicScorePara aggPerSlot := aggregatorsPerSlot(activeValidators) firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD)) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } firstMessageWeight := maxFirstDeliveryScore / firstMessageCap meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } meshWeight := -scoreByWeight(aggregateWeight, meshThreshold) @@ -238,13 +238,13 @@ func defaultSyncContributionTopicParams() *pubsub.TopicScoreParams { aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD)) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } firstMessageWeight := maxFirstDeliveryScore / firstMessageCap meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold) @@ -306,14 +306,14 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco // Determine expected first deliveries based on the message rate. firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate)) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } firstMessageWeight := maxFirstDeliveryScore / firstMessageCap // Determine expected mesh deliveries based on message rate applied with a dampening factor. meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor) if err != nil { - log.Warnf("skipping initializing topic scoring: %v", err) + log.WithError(err).Warn("skipping initializing topic scoring") return nil } meshWeight := -scoreByWeight(topicWeight, meshThreshold) diff --git a/beacon-chain/p2p/log.go b/beacon-chain/p2p/log.go index 8ca52b080c..b29ab5f16c 100644 --- a/beacon-chain/p2p/log.go +++ b/beacon-chain/p2p/log.go @@ -31,7 +31,7 @@ func logExternalIPAddr(id peer.ID, addr string, port uint) { if addr != "" { multiAddr, err := multiAddressBuilder(addr, port) if err != nil { - log.Errorf("Could not create multiaddress: %v", err) + log.WithError(err).Error("Could not create multiaddress") return } log.WithField( diff --git a/beacon-chain/p2p/options.go b/beacon-chain/p2p/options.go index 54edb82958..d31c1e2d26 100644 --- a/beacon-chain/p2p/options.go +++ b/beacon-chain/p2p/options.go @@ -21,7 +21,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt cfg := s.cfg listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort) if err != nil { - log.Fatalf("Failed to p2p listen: %v", err) + log.WithError(err).Fatal("Failed to p2p listen") } if cfg.LocalIP != "" { if net.ParseIP(cfg.LocalIP) == nil { @@ -29,16 +29,16 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt } listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort) if err != nil { - log.Fatalf("Failed to p2p listen: %v", err) + log.WithError(err).Fatal("Failed to p2p listen") } } ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey) if err != nil { - log.Fatalf("Failed to retrieve private key: %v", err) + log.WithError(err).Fatal("Failed to retrieve private key") } id, err := peer.IDFromPublicKey(ifaceKey.GetPublic()) if err != nil { - log.Fatalf("Failed to retrieve peer id: %v", err) + log.WithError(err).Fatal("Failed to retrieve peer id") } log.Infof("Running node with peer id of %s ", id.String()) diff --git a/beacon-chain/p2p/service.go b/beacon-chain/p2p/service.go index c7cda92c25..7a2aa8eb8d 100644 --- a/beacon-chain/p2p/service.go +++ b/beacon-chain/p2p/service.go @@ -226,7 +226,7 @@ func (s *Service) Start() { if len(s.cfg.StaticPeers) > 0 { addrs, err := peersFromStringAddrs(s.cfg.StaticPeers) if err != nil { - log.Errorf("Could not connect to static peer: %v", err) + log.WithError(err).Error("Could not connect to static peer") } s.connectWithAllPeers(addrs) } @@ -437,7 +437,7 @@ func (s *Service) awaitStateInitialized() { func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) { addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...) if err != nil { - log.Errorf("Could not convert to peer address info's from multiaddresses: %v", err) + log.WithError(err).Error("Could not convert to peer address info's from multiaddresses") return } for _, info := range addrInfos { diff --git a/beacon-chain/p2p/utils.go b/beacon-chain/p2p/utils.go index 3c21c1396a..db327b75ec 100644 --- a/beacon-chain/p2p/utils.go +++ b/beacon-chain/p2p/utils.go @@ -133,7 +133,7 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) { func ipAddr() net.IP { ip, err := network.ExternalIP() if err != nil { - log.Fatalf("Could not get IPv4 address: %v", err) + log.WithError(err).Fatal("Could not get IPv4 address") } return net.ParseIP(ip) } diff --git a/beacon-chain/p2p/watch_peers.go b/beacon-chain/p2p/watch_peers.go index 1cb783c941..ff7fcebede 100644 --- a/beacon-chain/p2p/watch_peers.go +++ b/beacon-chain/p2p/watch_peers.go @@ -19,7 +19,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) { } peerInfo, err := MakePeer(p) if err != nil { - log.Errorf("Could not make peer: %v", err) + log.WithError(err).Error("Could not make peer") continue } diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations.go index 83ffb8533a..e08dce07a4 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations.go @@ -365,7 +365,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) { case att := <-bs.ReceivedAttestationsBuffer: attDataRoot, err := att.Data.HashTreeRoot() if err != nil { - log.Errorf("Could not hash tree root attestation data: %v", err) + log.WithError(err).Error("Could not hash tree root attestation data") continue } attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go index bfad5ff9a5..3fc7e0d102 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go @@ -160,7 +160,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1 valid, err := validateDepositTrie(depositTrie, canonicalEth1Data) // Log a warning here, as the cached trie is invalid. if !valid { - log.Warnf("Cached deposit trie is invalid, rebuilding it now: %v", err) + log.WithError(err).Warn("Cached deposit trie is invalid, rebuilding it now") return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight) } @@ -190,7 +190,7 @@ func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *eth valid, err := validateDepositTrie(depositTrie, canonicalEth1Data) // Log an error here, as even with rebuilding the trie, it is still invalid. if !valid { - log.Errorf("Rebuilt deposit trie is invalid: %v", err) + log.WithError(err).Error("Rebuilt deposit trie is invalid") } return depositTrie, nil } diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index 6ac5fc81e0..098e81a6fa 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -132,7 +132,7 @@ func NewService(ctx context.Context, cfg *Config) *Service { address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port) lis, err := net.Listen("tcp", address) if err != nil { - log.Errorf("Could not listen to port in Start() %s: %v", address, err) + log.WithError(err).Errorf("Could not listen to port in Start() %s", address) } s.listener = lis log.WithField("address", address).Info("gRPC server listening on port") @@ -364,7 +364,7 @@ func (s *Service) Start() { go func() { if s.listener != nil { if err := s.grpcServer.Serve(s.listener); err != nil { - log.Errorf("Could not serve gRPC: %v", err) + log.WithError(err).Errorf("Could not serve gRPC") } } }() diff --git a/beacon-chain/sync/pending_blocks_queue.go b/beacon-chain/sync/pending_blocks_queue.go index 554dc05d84..7eb583cc40 100644 --- a/beacon-chain/sync/pending_blocks_queue.go +++ b/beacon-chain/sync/pending_blocks_queue.go @@ -150,7 +150,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { switch { case errors.Is(ErrOptimisticParent, err): // Ok to continue process block with parent that is an optimistic candidate. case err != nil: - log.Debugf("Could not validate block from slot %d: %v", b.Block().Slot(), err) + log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block") s.setBadBlock(ctx, blkRoot) tracing.AnnotateError(span, err) span.End() @@ -167,7 +167,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { s.setBadBlock(ctx, blkRoot) } } - log.Debugf("Could not process block from slot %d: %v", b.Block().Slot(), err) + log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not process block") // In the next iteration of the queue, this block will be removed from // the pending queue as it has been marked as a 'bad' block. @@ -258,7 +258,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra } if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil { tracing.AnnotateError(span, err) - log.Debugf("Could not send recent block request: %v", err) + log.WithError(err).Debug("Could not send recent block request") } newRoots := make([][32]byte, 0, len(roots)) s.pendingQueueLock.RLock() diff --git a/beacon-chain/sync/rpc.go b/beacon-chain/sync/rpc.go index b4751ccbf1..1f3a326f27 100644 --- a/beacon-chain/sync/rpc.go +++ b/beacon-chain/sync/rpc.go @@ -138,13 +138,13 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) { // Check before hand that peer is valid. if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) { if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil { - log.Debugf("Could not disconnect from peer: %v", err) + log.WithError(err).Debug("Could not disconnect from peer") } return } // Validate request according to peer limits. if err := s.rateLimiter.validateRawRpcRequest(stream); err != nil { - log.Debugf("Could not validate rpc request from peer: %v", err) + log.WithError(err).Debug("Could not validate rpc request from peer") return } s.rateLimiter.addRawStream(stream) diff --git a/beacon-chain/sync/rpc_goodbye.go b/beacon-chain/sync/rpc_goodbye.go index d0bff0c235..9b04224a7e 100644 --- a/beacon-chain/sync/rpc_goodbye.go +++ b/beacon-chain/sync/rpc_goodbye.go @@ -64,7 +64,7 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) { goodbyeCode = p2ptypes.GoodbyeCodeBanned } if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil { - log.Debugf("Error when disconnecting with bad peer: %v", err) + log.WithError(err).Debug("Error when disconnecting with bad peer") } } diff --git a/beacon-chain/sync/rpc_status.go b/beacon-chain/sync/rpc_status.go index ca704efc3f..de180088a2 100644 --- a/beacon-chain/sync/rpc_status.go +++ b/beacon-chain/sync/rpc_status.go @@ -40,7 +40,7 @@ func (s *Service) maintainPeerStatuses() { if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected { s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting) if err := s.cfg.p2p.Disconnect(id); err != nil { - log.Debugf("Error when disconnecting with peer: %v", err) + log.WithError(err).Debug("Error when disconnecting with peer") } s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected) return @@ -100,7 +100,7 @@ func (s *Service) resyncIfBehind() { numberOfTimesResyncedCounter.Inc() s.clearPendingSlots() if err := s.cfg.initialSync.Resync(); err != nil { - log.Errorf("Could not resync chain: %v", err) + log.WithError(err).Errorf("Could not resync chain") } } } diff --git a/cmd/beacon-chain/db/db.go b/cmd/beacon-chain/db/db.go index 50348cabf7..35fd8438b0 100644 --- a/cmd/beacon-chain/db/db.go +++ b/cmd/beacon-chain/db/db.go @@ -26,7 +26,7 @@ var Commands = &cli.Command{ Before: tos.VerifyTosAcceptedOrPrompt, Action: func(cliCtx *cli.Context) error { if err := beacondb.Restore(cliCtx); err != nil { - log.Fatalf("Could not restore database: %v", err) + log.WithError(err).Fatal("Could not restore database") } return nil }, diff --git a/cmd/client-stats/main.go b/cmd/client-stats/main.go index f7a1aad8c4..6a11e5f494 100644 --- a/cmd/client-stats/main.go +++ b/cmd/client-stats/main.go @@ -130,12 +130,12 @@ func run(ctx *cli.Context) error { for _, s := range scrapers { r, err := s.Scrape() if err != nil { - log.Errorf("Scraper error: %s", err) + log.WithError(err).Error("Scraper error") continue } err = upd.Update(r) if err != nil { - log.Errorf("client-stats collector error: %s", err) + log.WithError(err).Error("client-stats collector error") continue } } diff --git a/cmd/validator/accounts/accounts.go b/cmd/validator/accounts/accounts.go index 5df0f3de48..e4ebd24355 100644 --- a/cmd/validator/accounts/accounts.go +++ b/cmd/validator/accounts/accounts.go @@ -43,7 +43,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := accountsDelete(cliCtx); err != nil { - log.Fatalf("Could not delete account: %v", err) + log.WithError(err).Fatal("Could not delete account") } return nil }, @@ -80,7 +80,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := accountsList(cliCtx); err != nil { - log.Fatalf("Could not list accounts: %v", err) + log.WithError(err).Fatal("Could not list accounts") } return nil }, @@ -114,7 +114,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := accountsBackup(cliCtx); err != nil { - log.Fatalf("Could not backup accounts: %v", err) + log.WithError(err).Fatal("Could not backup accounts") } return nil }, @@ -145,7 +145,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := accountsImport(cliCtx); err != nil { - log.Fatalf("Could not import accounts: %v", err) + log.WithError(err).Fatal("Could not import accounts") } return nil }, @@ -182,7 +182,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := accountsExit(cliCtx, os.Stdin); err != nil { - log.Fatalf("Could not perform voluntary exit: %v", err) + log.WithError(err).Fatal("Could not perform voluntary exit") } return nil }, diff --git a/cmd/validator/db/db.go b/cmd/validator/db/db.go index 7bfad0a51b..d7af9a7d83 100644 --- a/cmd/validator/db/db.go +++ b/cmd/validator/db/db.go @@ -26,7 +26,7 @@ var Commands = &cli.Command{ Before: tos.VerifyTosAcceptedOrPrompt, Action: func(cliCtx *cli.Context) error { if err := validatordb.Restore(cliCtx); err != nil { - log.Fatalf("Could not restore database: %v", err) + log.WithError(err).Fatal("Could not restore database") } return nil }, @@ -45,7 +45,7 @@ var Commands = &cli.Command{ Before: tos.VerifyTosAcceptedOrPrompt, Action: func(cliCtx *cli.Context) error { if err := validatordb.MigrateUp(cliCtx); err != nil { - log.Fatalf("Could not run database migrations: %v", err) + log.WithError(err).Fatal("Could not run database migrations") } return nil }, @@ -59,7 +59,7 @@ var Commands = &cli.Command{ Before: tos.VerifyTosAcceptedOrPrompt, Action: func(cliCtx *cli.Context) error { if err := validatordb.MigrateDown(cliCtx); err != nil { - log.Fatalf("Could not run database migrations: %v", err) + log.WithError(err).Fatal("Could not run database migrations") } return nil }, diff --git a/cmd/validator/wallet/wallet.go b/cmd/validator/wallet/wallet.go index d84d3298c3..bded440d6d 100644 --- a/cmd/validator/wallet/wallet.go +++ b/cmd/validator/wallet/wallet.go @@ -50,7 +50,7 @@ var Commands = &cli.Command{ return err } if _, err := accounts.CreateAndSaveWalletCli(cliCtx); err != nil { - log.Fatalf("Could not create a wallet: %v", err) + log.WithError(err).Fatal("Could not create a wallet") } return nil }, @@ -83,7 +83,7 @@ var Commands = &cli.Command{ }, Action: func(cliCtx *cli.Context) error { if err := remoteWalletEdit(cliCtx); err != nil { - log.Fatalf("Could not edit wallet configuration: %v", err) + log.WithError(err).Fatal("Could not edit wallet configuration") } return nil }, @@ -115,7 +115,7 @@ var Commands = &cli.Command{ return err } if err := accounts.RecoverWalletCli(cliCtx); err != nil { - log.Fatalf("Could not recover wallet: %v", err) + log.WithError(err).Fatal("Could not recover wallet") } return nil }, diff --git a/cmd/validator/web/web.go b/cmd/validator/web/web.go index 0d9540f0a9..04aa12a038 100644 --- a/cmd/validator/web/web.go +++ b/cmd/validator/web/web.go @@ -44,7 +44,7 @@ var Commands = &cli.Command{ gatewayPort := cliCtx.Int(flags.GRPCGatewayPort.Name) validatorWebAddr := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort) if err := rpc.CreateAuthToken(walletDirPath, validatorWebAddr); err != nil { - log.Fatalf("Could not create web auth token: %v", err) + log.WithError(err).Fatal("Could not create web auth token") } return nil }, diff --git a/monitoring/prometheus/service.go b/monitoring/prometheus/service.go index 478f8597a3..5b456dcade 100644 --- a/monitoring/prometheus/service.go +++ b/monitoring/prometheus/service.go @@ -109,7 +109,7 @@ func (s *Service) healthzHandler(w http.ResponseWriter, r *http.Request) { } if err := writeResponse(w, r, response); err != nil { - log.Errorf("Error writing response: %v", err) + log.WithError(err).Error("Error writing response") } } @@ -139,7 +139,7 @@ func (s *Service) Start() { log.WithField("address", s.server.Addr).Debug("Starting prometheus service") err := s.server.ListenAndServe() if err != nil && err != http.ErrServerClosed { - log.Errorf("Could not listen to host:port :%s: %v", s.server.Addr, err) + log.WithError(err).Errorf("Could not listen to host:port :%s", s.server.Addr) s.failStatus = err } } diff --git a/nogo_config.json b/nogo_config.json index b4261401e0..f4c7fe52cc 100644 --- a/nogo_config.json +++ b/nogo_config.json @@ -92,6 +92,11 @@ ".*/.*_test\\.go": "Tests are OK to ignore this check for" } }, + "logruswitherror": { + "exclude_files": { + "external/.*": "Third party code" + } + }, "cryptorand": { "only_files": { "beacon-chain/.*": "", diff --git a/runtime/debug/debug.go b/runtime/debug/debug.go index 528faa43d0..368f358516 100644 --- a/runtime/debug/debug.go +++ b/runtime/debug/debug.go @@ -141,7 +141,7 @@ func (h *HandlerT) StartCPUProfile(file string) error { } if err := pprof.StartCPUProfile(f); err != nil { if err := f.Close(); err != nil { - log.Errorf("Failed to close file: %v", err) + log.WithError(err).Error("Failed to close file") } return err } @@ -191,7 +191,7 @@ func (h *HandlerT) StartGoTrace(file string) error { } if err := trace.Start(f); err != nil { if err := f.Close(); err != nil { - log.Errorf("Failed to close file: %v", err) + log.WithError(err).Error("Failed to close file") } return err } @@ -270,7 +270,7 @@ func (*HandlerT) WriteMemProfile(file string) error { func (*HandlerT) Stacks() string { buf := new(bytes.Buffer) if err := pprof.Lookup("goroutine").WriteTo(buf, 2); err != nil { - log.Errorf("Failed to write pprof goroutine stacks: %v", err) + log.WithError(err).Error("Failed to write pprof goroutine stacks") } return buf.String() } @@ -365,12 +365,12 @@ func startPProf(address string) { func Exit(ctx *cli.Context) { if traceFile := ctx.String(TraceFlag.Name); traceFile != "" { if err := Handler.StopGoTrace(); err != nil { - log.Errorf("Failed to stop go tracing: %v", err) + log.WithError(err).Error("Failed to stop go tracing") } } if cpuFile := ctx.String(CPUProfileFlag.Name); cpuFile != "" { if err := Handler.StopCPUProfile(); err != nil { - log.Errorf("Failed to stop CPU profiling: %v", err) + log.WithError(err).Error("Failed to stop CPU profiling") } } } diff --git a/runtime/prereqs/prereq.go b/runtime/prereqs/prereq.go index e8622888f6..b50d01430e 100644 --- a/runtime/prereqs/prereq.go +++ b/runtime/prereqs/prereq.go @@ -98,7 +98,7 @@ func meetsMinPlatformReqs(ctx context.Context) (bool, error) { func WarnIfPlatformNotSupported(ctx context.Context) { supported, err := meetsMinPlatformReqs(ctx) if err != nil { - log.Warnf("Failed to detect host platform: %v", err) + log.WithError(err).Warn("Failed to detect host platform") return } if !supported { diff --git a/tools/benchmark-files-gen/BUILD.bazel b/tools/benchmark-files-gen/BUILD.bazel index aa99381ac8..7f27067007 100644 --- a/tools/benchmark-files-gen/BUILD.bazel +++ b/tools/benchmark-files-gen/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//testing/benchmark:go_default_library", "//testing/util:go_default_library", "@com_github_pkg_errors//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", ], ) diff --git a/tools/benchmark-files-gen/main.go b/tools/benchmark-files-gen/main.go index d68ba2769a..b8727396e7 100644 --- a/tools/benchmark-files-gen/main.go +++ b/tools/benchmark-files-gen/main.go @@ -3,7 +3,6 @@ package main import ( "context" "flag" - "log" "os" "path" @@ -22,6 +21,7 @@ import ( "github.com/prysmaticlabs/prysm/runtime/interop" "github.com/prysmaticlabs/prysm/testing/benchmark" "github.com/prysmaticlabs/prysm/testing/util" + log "github.com/sirupsen/logrus" ) var ( @@ -60,15 +60,15 @@ func main() { log.Println("Generating genesis state") // Generating this for the 2 following states. if err := generateGenesisBeaconState(); err != nil { - log.Fatalf("Could not generate genesis state: %v", err) + log.WithError(err).Fatal("Could not generate genesis state") } log.Println("Generating full block and state after 1 skipped epoch") if err := generateMarshalledFullStateAndBlock(); err != nil { - log.Fatalf("Could not generate full state and block: %v", err) + log.WithError(err).Fatal("Could not generate full state and block") } log.Println("Generating state after 2 fully attested epochs") if err := generate2FullEpochState(); err != nil { - log.Fatalf("Could not generate 2 full epoch state: %v", err) + log.WithError(err).Fatal("Could not generate 2 full epoch state") } // Removing the genesis state SSZ since its 10MB large and no longer needed. if err := os.Remove(path.Join(*outputDir, benchmark.GenesisFileName)); err != nil { diff --git a/tools/bootnode/bootnode.go b/tools/bootnode/bootnode.go index 2ef9df1e40..2899ce177d 100644 --- a/tools/bootnode/bootnode.go +++ b/tools/bootnode/bootnode.go @@ -115,7 +115,7 @@ func main() { mux.HandleFunc("/p2p", handler.httpHandler) if err := http.ListenAndServe(fmt.Sprintf(":%d", *metricsPort), mux); err != nil { - log.Fatalf("Failed to start server %v", err) + log.WithError(err).Fatal("Failed to start server") } // Update metrics once per slot. diff --git a/tools/enr-calculator/main.go b/tools/enr-calculator/main.go index fb4cdce0a1..16f65ea4d3 100644 --- a/tools/enr-calculator/main.go +++ b/tools/enr-calculator/main.go @@ -44,17 +44,17 @@ func main() { } if net.ParseIP(*ipAddr).To4() == nil { - log.Fatalf("Invalid ipv4 address given: %v\n", err) + log.WithField("address", *ipAddr).Fatal("Invalid ipv4 address given") } if *udpPort == 0 { - log.Fatalf("Invalid udp port given: %v\n", err) + log.WithField("port", *udpPort).Fatal("Invalid udp port given") return } db, err := enode.OpenDB("") if err != nil { - log.Fatalf("Could not open node's peer database: %v\n", err) + log.WithError(err).Fatal("Could not open node's peer database") return } defer db.Close() diff --git a/tools/exploredb/main.go b/tools/exploredb/main.go index 8537b903c3..79febe140d 100644 --- a/tools/exploredb/main.go +++ b/tools/exploredb/main.go @@ -87,7 +87,7 @@ func main() { // check if the database file is present. dbNameWithPath := filepath.Join(*datadir, *dbName) if _, err := os.Stat(dbNameWithPath); os.IsNotExist(err) { - log.Fatalf("could not locate database file : %s, %v", dbNameWithPath, err) + log.WithError(err).WithField("path", dbNameWithPath).Fatal("could not locate database file") } switch *command { @@ -104,7 +104,7 @@ func main() { case "migration-check": destDbNameWithPath := filepath.Join(*destDatadir, *dbName) if _, err := os.Stat(destDbNameWithPath); os.IsNotExist(err) { - log.Fatalf("could not locate destination database file : %s, %v", destDbNameWithPath, err) + log.WithError(err).WithField("path", destDbNameWithPath).Fatal("could not locate database file") } switch *migrationName { case "validator-entries": @@ -133,14 +133,14 @@ func printBucketContents(dbNameWithPath string, rowLimit uint64, bucketName stri dbDirectory := filepath.Dir(dbNameWithPath) db, openErr := kv.NewKVStore(context.Background(), dbDirectory, &kv.Config{}) if openErr != nil { - log.Fatalf("could not open db, %v", openErr) + log.WithError(openErr).Fatal("could not open db") } // don't forget to close it when ejecting out of this function. defer func() { closeErr := db.Close() if closeErr != nil { - log.Fatalf("could not close db, %v", closeErr) + log.WithError(closeErr).Fatal("could not close db") } }() @@ -166,14 +166,14 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) { // open the raw database file. If the file is busy, then exit. db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) if openErr != nil { - log.Fatalf("could not open db to show bucket stats, %v", openErr) + log.WithError(openErr).Fatal("could not open db to show bucket stats") } // make sure we close the database before ejecting out of this function. defer func() { closeErr := db.Close() if closeErr != nil { - log.Fatalf("could not close db after showing bucket stats, %v", closeErr) + log.WithError(closeErr).Fatalf("could not close db after showing bucket stats") } }() @@ -185,7 +185,7 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) { return nil }) }); viewErr1 != nil { - log.Fatalf("could not read buckets from db while getting list of buckets: %v", viewErr1) + log.WithError(viewErr1).Fatal("could not read buckets from db while getting list of buckets") } // for every bucket, calculate the stats and send it for printing. @@ -381,7 +381,7 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) { sourceDbDirectory := filepath.Dir(dbNameWithPath) sourceDB, openErr := kv.NewKVStore(context.Background(), sourceDbDirectory, &kv.Config{}) if openErr != nil { - log.Fatalf("could not open sourceDB: %v", openErr) + log.WithError(openErr).Fatal("could not open sourceDB") } destinationDbDirectory := filepath.Dir(destDbNameWithPath) @@ -391,7 +391,7 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) { // if you want to avoid this then we should pass the metric name when opening the DB which touches // too many places. if openErr.Error() != "duplicate metrics collector registration attempted" { - log.Fatalf("could not open sourceDB, %v", openErr) + log.WithError(openErr).Fatalf("could not open sourceDB") } } @@ -399,13 +399,13 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) { defer func() { closeErr := sourceDB.Close() if closeErr != nil { - log.Fatalf("could not close sourceDB: %v", closeErr) + log.WithError(closeErr).Fatal("could not close sourceDB") } }() defer func() { closeErr := destDB.Close() if closeErr != nil { - log.Fatalf("could not close sourceDB: %v", closeErr) + log.WithError(closeErr).Fatal("could not close sourceDB") } }() @@ -414,11 +414,11 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) { for rowCount, key := range sourceStateKeys[910:] { sourceState, stateErr := sourceDB.State(ctx, bytesutil.ToBytes32(key)) if stateErr != nil { - log.Fatalf("could not get from source db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr) + log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from source db, the state for key") } destinationState, stateErr := destDB.State(ctx, bytesutil.ToBytes32(key)) if stateErr != nil { - log.Fatalf("could not get destination db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr) + log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from destination db, the state for key") } if destinationState == nil { log.Infof("could not find state in migrated DB: index = %d, slot = %d, epoch = %d, numOfValidators = %d, key = %s", @@ -432,13 +432,13 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) { } sourceStateHash, err := sourceState.HashTreeRoot(ctx) if err != nil { - log.Fatalf("could not find hash of source state: %v", err) + log.WithError(err).Fatal("could not find hash of source state") } - destinationSatteHash, err := destinationState.HashTreeRoot(ctx) + destinationStateHash, err := destinationState.HashTreeRoot(ctx) if err != nil { - log.Fatalf("could not find hash of destination state: %v", err) + log.WithError(err).Fatal("could not find hash of destination state") } - if !bytes.Equal(sourceStateHash[:], destinationSatteHash[:]) { + if !bytes.Equal(sourceStateHash[:], destinationStateHash[:]) { log.Fatalf("state mismatch : key = %s", hexutils.BytesToHex(key)) } } @@ -449,14 +449,14 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([] // open the raw database file. If the file is busy, then exit. db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) if openErr != nil { - log.Fatalf("could not open db while getting keys of a bucket, %v", openErr) + log.WithError(openErr).Fatal("could not open db while getting keys of a bucket") } // make sure we close the database before ejecting out of this function. defer func() { closeErr := db.Close() if closeErr != nil { - log.Fatalf("could not close db while getting keys of a bucket, %v", closeErr) + log.WithError(closeErr).Fatal("could not close db while getting keys of a bucket") } }() @@ -481,7 +481,7 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([] } return nil }); viewErr != nil { - log.Fatalf("could not read keys of bucket from db: %v", viewErr) + log.WithError(viewErr).Fatal("could not read keys of bucket from db") } return keys, sizes } diff --git a/tools/forkchecker/forkchecker.go b/tools/forkchecker/forkchecker.go index 2d7e735c6d..370506f452 100644 --- a/tools/forkchecker/forkchecker.go +++ b/tools/forkchecker/forkchecker.go @@ -49,7 +49,7 @@ func main() { for _, endpt := range endpts { conn, err := grpc.Dial(endpt, grpc.WithInsecure()) if err != nil { - log.Fatalf("fail to dial: %v", err) + log.WithError(err).Fatal("fail to dial") } clients[endpt] = pb.NewBeaconChainClient(conn) } diff --git a/tools/genesis-state-gen/BUILD.bazel b/tools/genesis-state-gen/BUILD.bazel index ea545f2bd3..639bdab9c3 100644 --- a/tools/genesis-state-gen/BUILD.bazel +++ b/tools/genesis-state-gen/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//proto/prysm/v1alpha1:go_default_library", "//runtime/interop:go_default_library", "@com_github_ghodss_yaml//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", ], ) diff --git a/tools/genesis-state-gen/main.go b/tools/genesis-state-gen/main.go index 2eabfbcf38..7ae636298c 100644 --- a/tools/genesis-state-gen/main.go +++ b/tools/genesis-state-gen/main.go @@ -6,7 +6,6 @@ import ( "encoding/json" "flag" "io" - "log" "os" "strings" @@ -15,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/io/file" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/runtime/interop" + log "github.com/sirupsen/logrus" ) // DepositDataJSON representing a json object of hex string and uint64 values for @@ -70,23 +70,23 @@ func main() { inputFile := *depositJSONFile expanded, err := file.ExpandPath(inputFile) if err != nil { - log.Printf("Could not expand file path %s: %v", inputFile, err) + log.WithError(err).Printf("Could not expand file path %s", inputFile) return } inputJSON, err := os.Open(expanded) // #nosec G304 if err != nil { - log.Printf("Could not open JSON file for reading: %v", err) + log.WithError(err).Print("Could not open JSON file for reading") return } defer func() { if err := inputJSON.Close(); err != nil { - log.Printf("Could not close file %s: %v", inputFile, err) + log.WithError(err).Printf("Could not close file %s", inputFile) } }() log.Printf("Generating genesis state from input JSON deposit data %s", inputFile) genesisState, err = genesisStateFromJSONValidators(inputJSON, *genesisTime) if err != nil { - log.Printf("Could not generate genesis beacon state: %v", err) + log.WithError(err).Print("Could not generate genesis beacon state") return } } else { @@ -97,7 +97,7 @@ func main() { // If no JSON input is specified, we create the state deterministically from interop keys. genesisState, _, err = interop.GenerateGenesisState(context.Background(), *genesisTime, uint64(*numValidators)) if err != nil { - log.Printf("Could not generate genesis beacon state: %v", err) + log.WithError(err).Print("Could not generate genesis beacon state") return } } @@ -105,11 +105,11 @@ func main() { if *sszOutputFile != "" { encodedState, err := genesisState.MarshalSSZ() if err != nil { - log.Printf("Could not ssz marshal the genesis beacon state: %v", err) + log.WithError(err).Print("Could not ssz marshal the genesis beacon state") return } if err := file.WriteFile(*sszOutputFile, encodedState); err != nil { - log.Printf("Could not write encoded genesis beacon state to file: %v", err) + log.WithError(err).Print("Could not write encoded genesis beacon state to file") return } log.Printf("Done writing to %s", *sszOutputFile) @@ -117,11 +117,11 @@ func main() { if *yamlOutputFile != "" { encodedState, err := yaml.Marshal(genesisState) if err != nil { - log.Printf("Could not yaml marshal the genesis beacon state: %v", err) + log.WithError(err).Print("Could not yaml marshal the genesis beacon state") return } if err := file.WriteFile(*yamlOutputFile, encodedState); err != nil { - log.Printf("Could not write encoded genesis beacon state to file: %v", err) + log.WithError(err).Print("Could not write encoded genesis beacon state to file") return } log.Printf("Done writing to %s", *yamlOutputFile) @@ -129,11 +129,11 @@ func main() { if *jsonOutputFile != "" { encodedState, err := json.Marshal(genesisState) if err != nil { - log.Printf("Could not json marshal the genesis beacon state: %v", err) + log.WithError(err).Print("Could not json marshal the genesis beacon state") return } if err := file.WriteFile(*jsonOutputFile, encodedState); err != nil { - log.Printf("Could not write encoded genesis beacon state to file: %v", err) + log.WithError(err).Print("Could not write encoded genesis beacon state to file") return } log.Printf("Done writing to %s", *jsonOutputFile) diff --git a/tools/gocovmerge/BUILD.bazel b/tools/gocovmerge/BUILD.bazel index 4221ddcb65..cf1736e3b4 100644 --- a/tools/gocovmerge/BUILD.bazel +++ b/tools/gocovmerge/BUILD.bazel @@ -6,7 +6,10 @@ go_library( srcs = ["main.go"], importpath = "github.com/prysmaticlabs/prysm/tools/gocovmerge", visibility = ["//visibility:private"], - deps = ["@org_golang_x_tools//cover:go_default_library"], + deps = [ + "@com_github_sirupsen_logrus//:go_default_library", + "@org_golang_x_tools//cover:go_default_library", + ], ) go_binary( diff --git a/tools/gocovmerge/main.go b/tools/gocovmerge/main.go index a79a8d8273..022520beae 100644 --- a/tools/gocovmerge/main.go +++ b/tools/gocovmerge/main.go @@ -8,10 +8,10 @@ import ( "flag" "fmt" "io" - "log" "os" "sort" + log "github.com/sirupsen/logrus" "golang.org/x/tools/cover" ) @@ -127,7 +127,7 @@ func main() { for _, file := range flag.Args() { profiles, err := cover.ParseProfiles(file) if err != nil { - log.Fatalf("failed to parse profiles: %v", err) + log.WithError(err).Fatal("failed to parse profiles") } for _, p := range profiles { diff --git a/tools/interop/convert-keys/BUILD.bazel b/tools/interop/convert-keys/BUILD.bazel index e7b5ef065e..47352cc63a 100644 --- a/tools/interop/convert-keys/BUILD.bazel +++ b/tools/interop/convert-keys/BUILD.bazel @@ -8,6 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//tools/unencrypted-keys-gen/keygen:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", "@in_gopkg_yaml_v2//:go_default_library", ], ) diff --git a/tools/interop/convert-keys/main.go b/tools/interop/convert-keys/main.go index 79dd9b0bb1..bb4b88041b 100644 --- a/tools/interop/convert-keys/main.go +++ b/tools/interop/convert-keys/main.go @@ -7,10 +7,10 @@ package main import ( "encoding/hex" "fmt" - "log" "os" "github.com/prysmaticlabs/prysm/tools/unencrypted-keys-gen/keygen" + log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) @@ -32,18 +32,18 @@ func main() { in, err := os.ReadFile(inFile) // #nosec G304 if err != nil { - log.Fatalf("Failed to read file %s: %v", inFile, err) + log.WithError(err).Fatalf("Failed to read file %s", inFile) } data := make(KeyPairs, 0) if err := yaml.UnmarshalStrict(in, &data); err != nil { - log.Fatalf("Failed to unmarshal yaml: %v", err) + log.WithError(err).Fatal("Failed to unmarshal yaml") } out := &keygen.UnencryptedKeysContainer{} for _, key := range data { pk, err := hex.DecodeString(key.Priv[2:]) if err != nil { - log.Fatalf("Failed to decode hex string %s: %v", key.Priv, err) + log.WithError(err).Fatalf("Failed to decode hex string %s", key.Priv) } out.Keys = append(out.Keys, &keygen.UnencryptedKeys{ @@ -54,7 +54,7 @@ func main() { outFile, err := os.Create(os.Args[2]) if err != nil { - log.Fatalf("Failed to create file at %s: %v", os.Args[2], err) + log.WithError(err).Fatalf("Failed to create file at %s", os.Args[2]) } cleanup := func() { if err := outFile.Close(); err != nil { @@ -65,7 +65,7 @@ func main() { if err := keygen.SaveUnencryptedKeysToFile(outFile, out); err != nil { // log.Fatalf will prevent defer from being called cleanup() - log.Fatalf("Failed to save %v", err) + log.WithError(err).Fatal("Failed to save") } log.Printf("Wrote %s\n", os.Args[2]) } diff --git a/validator/client/aggregate.go b/validator/client/aggregate.go index f8489ad82a..af00c094ec 100644 --- a/validator/client/aggregate.go +++ b/validator/client/aggregate.go @@ -33,7 +33,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot duty, err := v.duty(pubKey) if err != nil { - log.Errorf("Could not fetch validator assignment: %v", err) + log.WithError(err).Error("Could not fetch validator assignment") if v.emitAccountMetrics { ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } @@ -52,7 +52,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot slotSig, err := v.signSlotWithSelectionProof(ctx, pubKey, slot) if err != nil { - log.Errorf("Could not sign slot: %v", err) + log.WithError(err).Error("Could not sign slot") if v.emitAccountMetrics { ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } @@ -86,7 +86,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot sig, err := v.aggregateAndProofSig(ctx, pubKey, res.AggregateAndProof, slot) if err != nil { - log.Errorf("Could not sign aggregate and proof: %v", err) + log.WithError(err).Error("Could not sign aggregate and proof") return } _, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, ðpb.SignedAggregateSubmitRequest{ @@ -96,7 +96,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot }, }) if err != nil { - log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err) + log.WithError(err).Error("Could not submit signed aggregate and proof to beacon node") if v.emitAccountMetrics { ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } @@ -104,7 +104,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot } if err := v.addIndicesToLog(duty); err != nil { - log.Errorf("Could not add aggregator indices to logs: %v", err) + log.WithError(err).Error("Could not add aggregator indices to logs") if v.emitAccountMetrics { ValidatorAggFailVec.WithLabelValues(fmtKey).Inc() } diff --git a/validator/client/runner.go b/validator/client/runner.go index 818ea00e47..3792fdfb20 100644 --- a/validator/client/runner.go +++ b/validator/client/runner.go @@ -51,15 +51,15 @@ func run(ctx context.Context, v iface.Validator) { accountsChangedChan := make(chan [][fieldparams.BLSPubkeyLength]byte, 1) km, err := v.Keymanager() if err != nil { - log.Fatalf("Could not get keymanager: %v", err) + log.WithError(err).Fatal("Could not get keymanager") } sub := km.SubscribeAccountChanges(accountsChangedChan) // Set properties on the beacon node like the fee recipient for validators that are being used & active. if err := v.PushProposerSettings(ctx, km); err != nil { if errors.Is(err, ErrBuilderValidatorRegistration) { - log.Warnf("Push proposer settings error, %v", err) + log.WithError(err).Warn("Push proposer settings error") } else { - log.Fatalf("Failed to update proposer settings: %v", err) // allow fatal. skipcq + log.WithError(err).Fatal("Failed to update proposer settings") // allow fatal. skipcq } } for { @@ -89,7 +89,7 @@ func run(ctx context.Context, v iface.Validator) { log.Info("No active keys found. Waiting for activation...") err := v.WaitForActivation(ctx, accountsChangedChan) if err != nil { - log.Fatalf("Could not wait for validator activation: %v", err) + log.WithError(err).Fatal("Could not wait for validator activation") } } case slot := <-v.NextSlot(): @@ -122,7 +122,7 @@ func run(ctx context.Context, v iface.Validator) { go func() { //deadline set for next epoch rounded up if err := v.PushProposerSettings(ctx, km); err != nil { - log.Warnf("Failed to update proposer settings: %v", err) + log.WithError(err).Warn("Failed to update proposer settings") } }() } @@ -173,52 +173,52 @@ func waitForActivation(ctx context.Context, v iface.Validator) (types.Slot, erro } err := v.WaitForChainStart(ctx) if isConnectionError(err) { - log.Warnf("Could not determine if beacon chain started: %v", err) + log.WithError(err).Warn("Could not determine if beacon chain started") continue } if err != nil { - log.Fatalf("Could not determine if beacon chain started: %v", err) + log.WithError(err).Fatal("Could not determine if beacon chain started") } err = v.WaitForKeymanagerInitialization(ctx) if err != nil { - // log.Fatalf will prevent defer from being called + // log.Fatal will prevent defer from being called v.Done() - log.Fatalf("Wallet is not ready: %v", err) + log.WithError(err).Fatal("Wallet is not ready") } err = v.WaitForSync(ctx) if isConnectionError(err) { - log.Warnf("Could not determine if beacon chain started: %v", err) + log.WithError(err).Warn("Could not determine if beacon chain started") continue } if err != nil { - log.Fatalf("Could not determine if beacon node synced: %v", err) + log.WithError(err).Fatal("Could not determine if beacon node synced") } err = v.WaitForActivation(ctx, nil /* accountsChangedChan */) if isConnectionError(err) { - log.Warnf("Could not wait for validator activation: %v", err) + log.WithError(err).Warn("Could not wait for validator activation") continue } if err != nil { - log.Fatalf("Could not wait for validator activation: %v", err) + log.WithError(err).Fatal("Could not wait for validator activation") } headSlot, err = v.CanonicalHeadSlot(ctx) if isConnectionError(err) { - log.Warnf("Could not get current canonical head slot: %v", err) + log.WithError(err).Warn("Could not get current canonical head slot") continue } if err != nil { - log.Fatalf("Could not get current canonical head slot: %v", err) + log.WithError(err).Fatal("Could not get current canonical head slot") } err = v.CheckDoppelGanger(ctx) if isConnectionError(err) { - log.Warnf("Could not wait for checking doppelganger: %v", err) + log.WithError(err).Warn("Could not wait for checking doppelganger") continue } if err != nil { - log.Fatalf("Could not succeed with doppelganger check: %v", err) + log.WithError(err).Fatal("Could not succeed with doppelganger check") } break } diff --git a/validator/client/service.go b/validator/client/service.go index c2f10b92f5..207ed1446c 100644 --- a/validator/client/service.go +++ b/validator/client/service.go @@ -166,7 +166,7 @@ func (v *ValidatorService) Start() { sPubKeys, err := v.db.EIPImportBlacklistedPublicKeys(v.ctx) if err != nil { - log.Errorf("Could not read slashable public keys from disk: %v", err) + log.WithError(err).Error("Could not read slashable public keys from disk") return } slashablePublicKeys := make(map[[fieldparams.BLSPubkeyLength]byte]bool) @@ -176,7 +176,7 @@ func (v *ValidatorService) Start() { graffitiOrderedIndex, err := v.db.GraffitiOrderedIndex(v.ctx, v.graffitiStruct.Hash) if err != nil { - log.Errorf("Could not read graffiti ordered index from disk: %v", err) + log.WithError(err).Error("Could not read graffiti ordered index from disk") return } @@ -264,7 +264,7 @@ func ConstructDialOptions( if withCert != "" { creds, err := credentials.NewClientTLSFromFile(withCert, "") if err != nil { - log.Errorf("Could not get valid credentials: %v", err) + log.WithError(err).Error("Could not get valid credentials") return nil } transportSecurity = grpc.WithTransportCredentials(creds) diff --git a/validator/client/sync_committee.go b/validator/client/sync_committee.go index 7d5791072f..ee107a9001 100644 --- a/validator/client/sync_committee.go +++ b/validator/client/sync_committee.go @@ -99,7 +99,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t duty, err := v.duty(pubKey) if err != nil { - log.Errorf("Could not fetch validator assignment: %v", err) + log.WithError(err).Error("Could not fetch validator assignment") return } @@ -108,7 +108,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t Slot: slot, }) if err != nil { - log.Errorf("Could not get sync subcommittee index: %v", err) + log.WithError(err).Error("Could not get sync subcommittee index") return } if len(indexRes.Indices) == 0 { @@ -118,7 +118,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t selectionProofs, err := v.selectionProofs(ctx, slot, pubKey, indexRes) if err != nil { - log.Errorf("Could not get selection proofs: %v", err) + log.WithError(err).Error("Could not get selection proofs") return } @@ -127,7 +127,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t for i, comIdx := range indexRes.Indices { isAggregator, err := altair.IsSyncCommitteeAggregator(selectionProofs[i]) if err != nil { - log.Errorf("Could check in aggregator: %v", err) + log.WithError(err).Error("Could check in aggregator") return } if !isAggregator { @@ -141,7 +141,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t SubnetId: subnet, }) if err != nil { - log.Errorf("Could not get sync committee contribution: %v", err) + log.WithError(err).Error("Could not get sync committee contribution") return } if contribution.AggregationBits.Count() == 0 { @@ -160,7 +160,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t } sig, err := v.signContributionAndProof(ctx, pubKey, contributionAndProof, slot) if err != nil { - log.Errorf("Could not sign contribution and proof: %v", err) + log.WithError(err).Error("Could not sign contribution and proof") return } @@ -168,7 +168,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t Message: contributionAndProof, Signature: sig, }); err != nil { - log.Errorf("Could not submit signed contribution and proof: %v", err) + log.WithError(err).Error("Could not submit signed contribution and proof") return } diff --git a/validator/client/sync_committee_test.go b/validator/client/sync_committee_test.go index 1e1de7e351..065ed29699 100644 --- a/validator/client/sync_committee_test.go +++ b/validator/client/sync_committee_test.go @@ -259,7 +259,8 @@ func TestSubmitSignedContributionAndProof_BadDomain(t *testing.T) { }, errors.New("bad domain response")) validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) - require.LogsContain(t, hook, "Could not get selection proofs: bad domain response") + require.LogsContain(t, hook, "Could not get selection proofs") + require.LogsContain(t, hook, "bad domain response") } func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T) { diff --git a/validator/keymanager/remote-web3signer/internal/client.go b/validator/keymanager/remote-web3signer/internal/client.go index e76d9922eb..5a13f98191 100644 --- a/validator/keymanager/remote-web3signer/internal/client.go +++ b/validator/keymanager/remote-web3signer/internal/client.go @@ -218,6 +218,6 @@ func unmarshalSignatureResponse(responseBody io.ReadCloser) (bls.Signature, erro // closeBody a utility method to wrap an error for closing func closeBody(body io.Closer) { if err := body.Close(); err != nil { - log.Errorf("could not close response body: %v", err) + log.WithError(err).Error("could not close response body") } } diff --git a/validator/keymanager/remote/keymanager.go b/validator/keymanager/remote/keymanager.go index e33a6ae3d4..c5e5dbe71b 100644 --- a/validator/keymanager/remote/keymanager.go +++ b/validator/keymanager/remote/keymanager.go @@ -149,7 +149,7 @@ func UnmarshalOptionsFile(r io.ReadCloser) (*KeymanagerOpts, error) { } defer func() { if err := r.Close(); err != nil { - log.Errorf("Could not close keymanager config file: %v", err) + log.WithError(err).Error("Could not close keymanager config file") } }() opts := &KeymanagerOpts{ diff --git a/validator/rpc/intercepter.go b/validator/rpc/intercepter.go index e6aae0204d..47e30bc0a3 100644 --- a/validator/rpc/intercepter.go +++ b/validator/rpc/intercepter.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/golang-jwt/jwt/v4" + "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -24,7 +25,10 @@ func (s *Server) JWTInterceptor() grpc.UnaryServerInterceptor { return nil, err } h, err := handler(ctx, req) - log.Debugf("Request - Method: %s, Error: %v\n", info.FullMethod, err) + log.WithError(err).WithFields(logrus.Fields{ + "FullMethod": info.FullMethod, + "Server": info.Server, + }).Debug("Request handled") return h, err } } diff --git a/validator/rpc/server.go b/validator/rpc/server.go index c75d9b0192..d7de92d034 100644 --- a/validator/rpc/server.go +++ b/validator/rpc/server.go @@ -132,7 +132,7 @@ func (s *Server) Start() { address := fmt.Sprintf("%s:%s", s.host, s.port) lis, err := net.Listen("tcp", address) if err != nil { - log.Errorf("Could not listen to port in Start() %s: %v", address, err) + log.WithError(err).Errorf("Could not listen to port in Start() %s", address) } s.listener = lis @@ -182,7 +182,7 @@ func (s *Server) Start() { go func() { if s.listener != nil { if err := s.grpcServer.Serve(s.listener); err != nil { - log.Errorf("Could not serve: %v", err) + log.WithError(err).Error("Could not serve") } } }() @@ -190,7 +190,7 @@ func (s *Server) Start() { if s.walletDir != "" { token, err := s.initializeAuthToken(s.walletDir) if err != nil { - log.Errorf("Could not initialize web auth token: %v", err) + log.WithError(err).Error("Could not initialize web auth token") return } validatorWebAddr := fmt.Sprintf("%s:%d", s.validatorGatewayHost, s.validatorGatewayPort) diff --git a/validator/rpc/standard_api.go b/validator/rpc/standard_api.go index fe13bd8ce4..05b3246382 100644 --- a/validator/rpc/standard_api.go +++ b/validator/rpc/standard_api.go @@ -173,7 +173,7 @@ func (s *Server) DeleteKeystores( exportedHistory, err := s.slashingProtectionHistoryForDeletedKeys(ctx, req.Pubkeys, statuses) if err != nil { - log.Warnf("Could not get slashing protection history for deleted keys: %v", err) + log.WithError(err).Warn("Could not get slashing protection history for deleted keys") statuses := groupExportErrors(req, "Non duplicate keys that were existing were deleted, but could not export slashing protection history.") return ðpbservice.DeleteKeystoresResponse{Data: statuses}, nil }