mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 21:08:10 -05:00
Enforce log.WithError(err) static analysis and fix all violations (#11163)
* Use log.WithError static analysis from #11143 and fix all violations * Fix another log violation after pulling from develop * Update beacon-chain/sync/pending_blocks_queue.go Co-authored-by: Potuz <potuz@prysmaticlabs.com> * @potuz feedback * Copy paste fail * fix tests Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: Potuz <potuz@prysmaticlabs.com>
This commit is contained in:
@@ -122,6 +122,7 @@ nogo(
|
|||||||
"//tools/analyzers/gocognit:go_default_library",
|
"//tools/analyzers/gocognit:go_default_library",
|
||||||
"//tools/analyzers/ineffassign:go_default_library",
|
"//tools/analyzers/ineffassign:go_default_library",
|
||||||
"//tools/analyzers/interfacechecker:go_default_library",
|
"//tools/analyzers/interfacechecker:go_default_library",
|
||||||
|
"//tools/analyzers/logruswitherror:go_default_library",
|
||||||
"//tools/analyzers/maligned:go_default_library",
|
"//tools/analyzers/maligned:go_default_library",
|
||||||
"//tools/analyzers/nop:go_default_library",
|
"//tools/analyzers/nop:go_default_library",
|
||||||
"//tools/analyzers/properpermissions:go_default_library",
|
"//tools/analyzers/properpermissions:go_default_library",
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
for i, validator := range postState.Validators() {
|
for i, validator := range postState.Validators() {
|
||||||
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not load validator balance: %v", err)
|
log.WithError(err).Error("Could not load validator balance")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if validator.Slashed {
|
if validator.Slashed {
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
|||||||
// log.Fatalf will prevent defer from being called
|
// log.Fatalf will prevent defer from being called
|
||||||
span.End()
|
span.End()
|
||||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
log.WithError(err).Fatal("Could not verify weak subjectivity checkpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -152,7 +152,7 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
|||||||
|
|
||||||
// Add block attestations to the fork choice pool to compute head.
|
// Add block attestations to the fork choice pool to compute head.
|
||||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||||
log.Errorf("Could not save block attestations for fork choice: %v", err)
|
log.WithError(err).Error("Could not save block attestations for fork choice")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||||
|
|||||||
@@ -395,12 +395,12 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
|
|||||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||||
}
|
}
|
||||||
// We start a counter to genesis, if needed.
|
// We start a counter to genesis, if needed.
|
||||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||||
}
|
}
|
||||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||||
|
|
||||||
@@ -473,7 +473,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
|||||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||||
|
|
||||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||||
}
|
}
|
||||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||||
// Set genesis as fully validated
|
// Set genesis as fully validated
|
||||||
@@ -483,7 +483,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
|||||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||||
|
|
||||||
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
|
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
|
||||||
log.Fatalf("Could not set head: %v", err)
|
log.WithError(err).Fatal("Could not set head")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -508,11 +508,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
|||||||
|
|
||||||
gState, err := db.GenesisState(ctx)
|
gState, err := db.GenesisState(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
log.WithError(err).Fatal("Could not retrieve genesis state")
|
||||||
}
|
}
|
||||||
gRoot, err := gState.HashTreeRoot(ctx)
|
gRoot, err := gState.HashTreeRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||||
}
|
}
|
||||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
|||||||
|
|
||||||
depRoot, err := hash.HashProto(d)
|
depRoot, err := hash.HashProto(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not remove deposit %v", err)
|
log.WithError(err).Error("Could not remove deposit")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +111,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
|||||||
for i, ctnr := range dc.pendingDeposits {
|
for i, ctnr := range dc.pendingDeposits {
|
||||||
h, err := hash.HashProto(ctnr.Deposit)
|
h, err := hash.HashProto(ctnr.Deposit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not hash deposit %v", err)
|
log.WithError(err).Error("Could not hash deposit")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if h == depRoot {
|
if h == depRoot {
|
||||||
|
|||||||
@@ -176,7 +176,7 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
|||||||
}
|
}
|
||||||
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
||||||
// Ignore this error as in the spec pseudo code.
|
// Ignore this error as in the spec pseudo code.
|
||||||
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
|
log.WithError(err).Debug("Skipping deposit: could not verify deposit data signature")
|
||||||
return beaconState, newValidator, nil
|
return beaconState, newValidator, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ func IsCurrentPeriodSyncCommittee(
|
|||||||
// Fill in the cache on miss.
|
// Fill in the cache on miss.
|
||||||
go func() {
|
go func() {
|
||||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
log.WithError(err).Error("Could not fill sync committee cache on miss")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -110,7 +110,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
|||||||
// Fill in the cache on miss.
|
// Fill in the cache on miss.
|
||||||
go func() {
|
go func() {
|
||||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
log.WithError(err).Error("Could not fill sync committee cache on miss")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
@@ -63,18 +63,18 @@ func (s *Service) Start() {
|
|||||||
if s.cfg.GenesisPath != "" {
|
if s.cfg.GenesisPath != "" {
|
||||||
data, err := os.ReadFile(s.cfg.GenesisPath)
|
data, err := os.ReadFile(s.cfg.GenesisPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not read pre-loaded state: %v", err)
|
log.WithError(err).Fatal("Could not read pre-loaded state")
|
||||||
}
|
}
|
||||||
genesisState := ðpb.BeaconState{}
|
genesisState := ðpb.BeaconState{}
|
||||||
if err := genesisState.UnmarshalSSZ(data); err != nil {
|
if err := genesisState.UnmarshalSSZ(data); err != nil {
|
||||||
log.Fatalf("Could not unmarshal pre-loaded state: %v", err)
|
log.WithError(err).Fatal("Could not unmarshal pre-loaded state")
|
||||||
}
|
}
|
||||||
genesisTrie, err := v1.InitializeFromProto(genesisState)
|
genesisTrie, err := v1.InitializeFromProto(genesisState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not get state trie: %v", err)
|
log.WithError(err).Fatal("Could not get state trie")
|
||||||
}
|
}
|
||||||
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
||||||
log.Fatalf("Could not save interop genesis state %v", err)
|
log.WithError(err).Fatal("Could not save interop genesis state")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -82,11 +82,11 @@ func (s *Service) Start() {
|
|||||||
// Save genesis state in db
|
// Save genesis state in db
|
||||||
genesisState, _, err := interop.GenerateGenesisState(s.ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
|
genesisState, _, err := interop.GenerateGenesisState(s.ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not generate interop genesis state: %v", err)
|
log.WithError(err).Fatal("Could not generate interop genesis state")
|
||||||
}
|
}
|
||||||
genesisTrie, err := v1.InitializeFromProto(genesisState)
|
genesisTrie, err := v1.InitializeFromProto(genesisState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not get state trie: %v", err)
|
log.WithError(err).Fatal("Could not get state trie")
|
||||||
}
|
}
|
||||||
if s.cfg.GenesisTime == 0 {
|
if s.cfg.GenesisTime == 0 {
|
||||||
// Generated genesis time; fetch it
|
// Generated genesis time; fetch it
|
||||||
@@ -94,12 +94,12 @@ func (s *Service) Start() {
|
|||||||
}
|
}
|
||||||
gRoot, err := genesisTrie.HashTreeRoot(s.ctx)
|
gRoot, err := genesisTrie.HashTreeRoot(s.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||||
}
|
}
|
||||||
go slots.CountdownToGenesis(s.ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
|
go slots.CountdownToGenesis(s.ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
|
||||||
|
|
||||||
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
||||||
log.Fatalf("Could not save interop genesis state %v", err)
|
log.WithError(err).Fatal("Could not save interop genesis state")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -190,7 +190,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
|
|||||||
err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0])
|
err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.LogsContain(t, hook, "could not verify deposit data signature: could not convert bytes to signature")
|
require.LogsContain(t, hook, "could not verify deposit data signature")
|
||||||
|
require.LogsContain(t, hook, "could not convert bytes to signature")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethtypes.Lo
|
|||||||
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
||||||
}
|
}
|
||||||
if err := s.processDeposit(ctx, eth1Data, deposit); err != nil {
|
if err := s.processDeposit(ctx, eth1Data, deposit); err != nil {
|
||||||
log.Errorf("Invalid deposit processed: %v", err)
|
log.WithError(err).Error("Invalid deposit processed")
|
||||||
validData = false
|
validData = false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -236,7 +236,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
|||||||
for i := range s.chainStartData.ChainstartDeposits {
|
for i := range s.chainStartData.ChainstartDeposits {
|
||||||
proof, err := s.depositTrie.MerkleProof(i)
|
proof, err := s.depositTrie.MerkleProof(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("unable to generate deposit proof %v", err)
|
log.WithError(err).Error("unable to generate deposit proof")
|
||||||
}
|
}
|
||||||
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
|||||||
logCounter := 0
|
logCounter := 0
|
||||||
errorLogger := func(err error, msg string) {
|
errorLogger := func(err error, msg string) {
|
||||||
if logCounter > logThreshold {
|
if logCounter > logThreshold {
|
||||||
log.Errorf("%s: %v", msg, err)
|
log.WithError(err).Error(msg)
|
||||||
logCounter = 0
|
logCounter = 0
|
||||||
}
|
}
|
||||||
logCounter++
|
logCounter++
|
||||||
@@ -114,7 +114,7 @@ func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
|||||||
|
|
||||||
currClient := s.rpcClient
|
currClient := s.rpcClient
|
||||||
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
||||||
log.Debugf("Primary endpoint not ready: %v", err)
|
log.WithError(err).Debug("Primary endpoint not ready")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Close previous client, if connection was successful.
|
// Close previous client, if connection was successful.
|
||||||
|
|||||||
@@ -552,7 +552,7 @@ func (s *Service) initPOWService() {
|
|||||||
logCounter := 0
|
logCounter := 0
|
||||||
errorLogger := func(err error, msg string) {
|
errorLogger := func(err error, msg string) {
|
||||||
if logCounter > logThreshold {
|
if logCounter > logThreshold {
|
||||||
log.Errorf("%s: %v", msg, err)
|
log.WithError(err).Error(msg)
|
||||||
logCounter = 0
|
logCounter = 0
|
||||||
}
|
}
|
||||||
logCounter++
|
logCounter++
|
||||||
|
|||||||
@@ -345,7 +345,7 @@ func (b *BeaconNode) Close() {
|
|||||||
log.Info("Stopping beacon node")
|
log.Info("Stopping beacon node")
|
||||||
b.services.StopAll()
|
b.services.StopAll()
|
||||||
if err := b.db.Close(); err != nil {
|
if err := b.db.Close(); err != nil {
|
||||||
log.Errorf("Failed to close database: %v", err)
|
log.WithError(err).Error("Failed to close database")
|
||||||
}
|
}
|
||||||
b.collector.unregister()
|
b.collector.unregister()
|
||||||
b.cancel()
|
b.cancel()
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func withRelayAddrs(relay string) config.AddrsFactory {
|
|||||||
}
|
}
|
||||||
relayAddr, err := ma.NewMultiaddr(relay + "/p2p-circuit" + a.String())
|
relayAddr, err := ma.NewMultiaddr(relay + "/p2p-circuit" + a.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to create multiaddress for relay node: %v", err)
|
log.WithError(err).Error("Failed to create multiaddress for relay node")
|
||||||
} else {
|
} else {
|
||||||
relayAddrs = append(relayAddrs, relayAddr)
|
relayAddrs = append(relayAddrs, relayAddr)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -186,7 +186,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
|||||||
// In the event our sync message is outdated and beyond the
|
// In the event our sync message is outdated and beyond the
|
||||||
// acceptable threshold, we exit early and do not broadcast it.
|
// acceptable threshold, we exit early and do not broadcast it.
|
||||||
if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||||
log.Warnf("Sync Committee Message is too old to broadcast, discarding it. %v", err)
|
log.WithError(err).Warn("Sync Committee Message is too old to broadcast, discarding it")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func (s *Service) RefreshENR() {
|
|||||||
}
|
}
|
||||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not retrieve att bitfield: %v", err)
|
log.WithError(err).Error("Could not retrieve att bitfield")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Compare current epoch with our fork epochs
|
// Compare current epoch with our fork epochs
|
||||||
@@ -67,7 +67,7 @@ func (s *Service) RefreshENR() {
|
|||||||
}
|
}
|
||||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not retrieve sync bitfield: %v", err)
|
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||||
@@ -356,7 +356,7 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
|
|||||||
multiAddrString = append(multiAddrString, addr)
|
multiAddrString = append(multiAddrString, addr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Errorf("Invalid address of %s provided: %v", addr, err)
|
log.WithError(err).Errorf("Invalid address of %s provided", addr)
|
||||||
}
|
}
|
||||||
return enodeString, multiAddrString
|
return enodeString, multiAddrString
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -196,13 +196,13 @@ func defaultAggregateTopicParams(activeValidators uint64) *pubsub.TopicScorePara
|
|||||||
aggPerSlot := aggregatorsPerSlot(activeValidators)
|
aggPerSlot := aggregatorsPerSlot(activeValidators)
|
||||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
meshWeight := -scoreByWeight(aggregateWeight, meshThreshold)
|
meshWeight := -scoreByWeight(aggregateWeight, meshThreshold)
|
||||||
@@ -238,13 +238,13 @@ func defaultSyncContributionTopicParams() *pubsub.TopicScoreParams {
|
|||||||
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
|
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
|
||||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
|
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
|
||||||
@@ -306,14 +306,14 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) *pubsub.TopicSco
|
|||||||
// Determine expected first deliveries based on the message rate.
|
// Determine expected first deliveries based on the message rate.
|
||||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||||
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
||||||
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor)
|
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(numPerSlot)/dampeningFactor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
log.WithError(err).Warn("skipping initializing topic scoring")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
|||||||
if addr != "" {
|
if addr != "" {
|
||||||
multiAddr, err := multiAddressBuilder(addr, port)
|
multiAddr, err := multiAddressBuilder(addr, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not create multiaddress: %v", err)
|
log.WithError(err).Error("Could not create multiaddress")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.WithField(
|
log.WithField(
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
|||||||
cfg := s.cfg
|
cfg := s.cfg
|
||||||
listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort)
|
listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to p2p listen: %v", err)
|
log.WithError(err).Fatal("Failed to p2p listen")
|
||||||
}
|
}
|
||||||
if cfg.LocalIP != "" {
|
if cfg.LocalIP != "" {
|
||||||
if net.ParseIP(cfg.LocalIP) == nil {
|
if net.ParseIP(cfg.LocalIP) == nil {
|
||||||
@@ -29,16 +29,16 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
|||||||
}
|
}
|
||||||
listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to p2p listen: %v", err)
|
log.WithError(err).Fatal("Failed to p2p listen")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
|
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to retrieve private key: %v", err)
|
log.WithError(err).Fatal("Failed to retrieve private key")
|
||||||
}
|
}
|
||||||
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
|
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to retrieve peer id: %v", err)
|
log.WithError(err).Fatal("Failed to retrieve peer id")
|
||||||
}
|
}
|
||||||
log.Infof("Running node with peer id of %s ", id.String())
|
log.Infof("Running node with peer id of %s ", id.String())
|
||||||
|
|
||||||
|
|||||||
@@ -226,7 +226,7 @@ func (s *Service) Start() {
|
|||||||
if len(s.cfg.StaticPeers) > 0 {
|
if len(s.cfg.StaticPeers) > 0 {
|
||||||
addrs, err := peersFromStringAddrs(s.cfg.StaticPeers)
|
addrs, err := peersFromStringAddrs(s.cfg.StaticPeers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not connect to static peer: %v", err)
|
log.WithError(err).Error("Could not connect to static peer")
|
||||||
}
|
}
|
||||||
s.connectWithAllPeers(addrs)
|
s.connectWithAllPeers(addrs)
|
||||||
}
|
}
|
||||||
@@ -437,7 +437,7 @@ func (s *Service) awaitStateInitialized() {
|
|||||||
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
|
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
|
||||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not convert to peer address info's from multiaddresses: %v", err)
|
log.WithError(err).Error("Could not convert to peer address info's from multiaddresses")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, info := range addrInfos {
|
for _, info := range addrInfos {
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
|||||||
func ipAddr() net.IP {
|
func ipAddr() net.IP {
|
||||||
ip, err := network.ExternalIP()
|
ip, err := network.ExternalIP()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not get IPv4 address: %v", err)
|
log.WithError(err).Fatal("Could not get IPv4 address")
|
||||||
}
|
}
|
||||||
return net.ParseIP(ip)
|
return net.ParseIP(ip)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
|
|||||||
}
|
}
|
||||||
peerInfo, err := MakePeer(p)
|
peerInfo, err := MakePeer(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not make peer: %v", err)
|
log.WithError(err).Error("Could not make peer")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
|
|||||||
case att := <-bs.ReceivedAttestationsBuffer:
|
case att := <-bs.ReceivedAttestationsBuffer:
|
||||||
attDataRoot, err := att.Data.HashTreeRoot()
|
attDataRoot, err := att.Data.HashTreeRoot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not hash tree root attestation data: %v", err)
|
log.WithError(err).Error("Could not hash tree root attestation data")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)
|
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)
|
||||||
|
|||||||
@@ -160,7 +160,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
|||||||
valid, err := validateDepositTrie(depositTrie, canonicalEth1Data)
|
valid, err := validateDepositTrie(depositTrie, canonicalEth1Data)
|
||||||
// Log a warning here, as the cached trie is invalid.
|
// Log a warning here, as the cached trie is invalid.
|
||||||
if !valid {
|
if !valid {
|
||||||
log.Warnf("Cached deposit trie is invalid, rebuilding it now: %v", err)
|
log.WithError(err).Warn("Cached deposit trie is invalid, rebuilding it now")
|
||||||
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
|
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +190,7 @@ func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *eth
|
|||||||
valid, err := validateDepositTrie(depositTrie, canonicalEth1Data)
|
valid, err := validateDepositTrie(depositTrie, canonicalEth1Data)
|
||||||
// Log an error here, as even with rebuilding the trie, it is still invalid.
|
// Log an error here, as even with rebuilding the trie, it is still invalid.
|
||||||
if !valid {
|
if !valid {
|
||||||
log.Errorf("Rebuilt deposit trie is invalid: %v", err)
|
log.WithError(err).Error("Rebuilt deposit trie is invalid")
|
||||||
}
|
}
|
||||||
return depositTrie, nil
|
return depositTrie, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
|||||||
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
|
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
|
||||||
lis, err := net.Listen("tcp", address)
|
lis, err := net.Listen("tcp", address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not listen to port in Start() %s: %v", address, err)
|
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
|
||||||
}
|
}
|
||||||
s.listener = lis
|
s.listener = lis
|
||||||
log.WithField("address", address).Info("gRPC server listening on port")
|
log.WithField("address", address).Info("gRPC server listening on port")
|
||||||
@@ -364,7 +364,7 @@ func (s *Service) Start() {
|
|||||||
go func() {
|
go func() {
|
||||||
if s.listener != nil {
|
if s.listener != nil {
|
||||||
if err := s.grpcServer.Serve(s.listener); err != nil {
|
if err := s.grpcServer.Serve(s.listener); err != nil {
|
||||||
log.Errorf("Could not serve gRPC: %v", err)
|
log.WithError(err).Errorf("Could not serve gRPC")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
|||||||
switch {
|
switch {
|
||||||
case errors.Is(ErrOptimisticParent, err): // Ok to continue process block with parent that is an optimistic candidate.
|
case errors.Is(ErrOptimisticParent, err): // Ok to continue process block with parent that is an optimistic candidate.
|
||||||
case err != nil:
|
case err != nil:
|
||||||
log.Debugf("Could not validate block from slot %d: %v", b.Block().Slot(), err)
|
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block")
|
||||||
s.setBadBlock(ctx, blkRoot)
|
s.setBadBlock(ctx, blkRoot)
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
span.End()
|
span.End()
|
||||||
@@ -167,7 +167,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
|||||||
s.setBadBlock(ctx, blkRoot)
|
s.setBadBlock(ctx, blkRoot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Debugf("Could not process block from slot %d: %v", b.Block().Slot(), err)
|
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not process block")
|
||||||
|
|
||||||
// In the next iteration of the queue, this block will be removed from
|
// In the next iteration of the queue, this block will be removed from
|
||||||
// the pending queue as it has been marked as a 'bad' block.
|
// the pending queue as it has been marked as a 'bad' block.
|
||||||
@@ -258,7 +258,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
|||||||
}
|
}
|
||||||
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
log.Debugf("Could not send recent block request: %v", err)
|
log.WithError(err).Debug("Could not send recent block request")
|
||||||
}
|
}
|
||||||
newRoots := make([][32]byte, 0, len(roots))
|
newRoots := make([][32]byte, 0, len(roots))
|
||||||
s.pendingQueueLock.RLock()
|
s.pendingQueueLock.RLock()
|
||||||
|
|||||||
@@ -138,13 +138,13 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
|||||||
// Check before hand that peer is valid.
|
// Check before hand that peer is valid.
|
||||||
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||||
log.Debugf("Could not disconnect from peer: %v", err)
|
log.WithError(err).Debug("Could not disconnect from peer")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Validate request according to peer limits.
|
// Validate request according to peer limits.
|
||||||
if err := s.rateLimiter.validateRawRpcRequest(stream); err != nil {
|
if err := s.rateLimiter.validateRawRpcRequest(stream); err != nil {
|
||||||
log.Debugf("Could not validate rpc request from peer: %v", err)
|
log.WithError(err).Debug("Could not validate rpc request from peer")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.rateLimiter.addRawStream(stream)
|
s.rateLimiter.addRawStream(stream)
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
|||||||
goodbyeCode = p2ptypes.GoodbyeCodeBanned
|
goodbyeCode = p2ptypes.GoodbyeCodeBanned
|
||||||
}
|
}
|
||||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||||
log.Debugf("Error when disconnecting with bad peer: %v", err)
|
log.WithError(err).Debug("Error when disconnecting with bad peer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func (s *Service) maintainPeerStatuses() {
|
|||||||
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
||||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting)
|
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting)
|
||||||
if err := s.cfg.p2p.Disconnect(id); err != nil {
|
if err := s.cfg.p2p.Disconnect(id); err != nil {
|
||||||
log.Debugf("Error when disconnecting with peer: %v", err)
|
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||||
}
|
}
|
||||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||||
return
|
return
|
||||||
@@ -100,7 +100,7 @@ func (s *Service) resyncIfBehind() {
|
|||||||
numberOfTimesResyncedCounter.Inc()
|
numberOfTimesResyncedCounter.Inc()
|
||||||
s.clearPendingSlots()
|
s.clearPendingSlots()
|
||||||
if err := s.cfg.initialSync.Resync(); err != nil {
|
if err := s.cfg.initialSync.Resync(); err != nil {
|
||||||
log.Errorf("Could not resync chain: %v", err)
|
log.WithError(err).Errorf("Could not resync chain")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ var Commands = &cli.Command{
|
|||||||
Before: tos.VerifyTosAcceptedOrPrompt,
|
Before: tos.VerifyTosAcceptedOrPrompt,
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := beacondb.Restore(cliCtx); err != nil {
|
if err := beacondb.Restore(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not restore database: %v", err)
|
log.WithError(err).Fatal("Could not restore database")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -130,12 +130,12 @@ func run(ctx *cli.Context) error {
|
|||||||
for _, s := range scrapers {
|
for _, s := range scrapers {
|
||||||
r, err := s.Scrape()
|
r, err := s.Scrape()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Scraper error: %s", err)
|
log.WithError(err).Error("Scraper error")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = upd.Update(r)
|
err = upd.Update(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("client-stats collector error: %s", err)
|
log.WithError(err).Error("client-stats collector error")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := accountsDelete(cliCtx); err != nil {
|
if err := accountsDelete(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not delete account: %v", err)
|
log.WithError(err).Fatal("Could not delete account")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -80,7 +80,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := accountsList(cliCtx); err != nil {
|
if err := accountsList(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not list accounts: %v", err)
|
log.WithError(err).Fatal("Could not list accounts")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -114,7 +114,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := accountsBackup(cliCtx); err != nil {
|
if err := accountsBackup(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not backup accounts: %v", err)
|
log.WithError(err).Fatal("Could not backup accounts")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -145,7 +145,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := accountsImport(cliCtx); err != nil {
|
if err := accountsImport(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not import accounts: %v", err)
|
log.WithError(err).Fatal("Could not import accounts")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -182,7 +182,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := accountsExit(cliCtx, os.Stdin); err != nil {
|
if err := accountsExit(cliCtx, os.Stdin); err != nil {
|
||||||
log.Fatalf("Could not perform voluntary exit: %v", err)
|
log.WithError(err).Fatal("Could not perform voluntary exit")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ var Commands = &cli.Command{
|
|||||||
Before: tos.VerifyTosAcceptedOrPrompt,
|
Before: tos.VerifyTosAcceptedOrPrompt,
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := validatordb.Restore(cliCtx); err != nil {
|
if err := validatordb.Restore(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not restore database: %v", err)
|
log.WithError(err).Fatal("Could not restore database")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -45,7 +45,7 @@ var Commands = &cli.Command{
|
|||||||
Before: tos.VerifyTosAcceptedOrPrompt,
|
Before: tos.VerifyTosAcceptedOrPrompt,
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := validatordb.MigrateUp(cliCtx); err != nil {
|
if err := validatordb.MigrateUp(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not run database migrations: %v", err)
|
log.WithError(err).Fatal("Could not run database migrations")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -59,7 +59,7 @@ var Commands = &cli.Command{
|
|||||||
Before: tos.VerifyTosAcceptedOrPrompt,
|
Before: tos.VerifyTosAcceptedOrPrompt,
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := validatordb.MigrateDown(cliCtx); err != nil {
|
if err := validatordb.MigrateDown(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not run database migrations: %v", err)
|
log.WithError(err).Fatal("Could not run database migrations")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ var Commands = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := accounts.CreateAndSaveWalletCli(cliCtx); err != nil {
|
if _, err := accounts.CreateAndSaveWalletCli(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not create a wallet: %v", err)
|
log.WithError(err).Fatal("Could not create a wallet")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -83,7 +83,7 @@ var Commands = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cliCtx *cli.Context) error {
|
Action: func(cliCtx *cli.Context) error {
|
||||||
if err := remoteWalletEdit(cliCtx); err != nil {
|
if err := remoteWalletEdit(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not edit wallet configuration: %v", err)
|
log.WithError(err).Fatal("Could not edit wallet configuration")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -115,7 +115,7 @@ var Commands = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := accounts.RecoverWalletCli(cliCtx); err != nil {
|
if err := accounts.RecoverWalletCli(cliCtx); err != nil {
|
||||||
log.Fatalf("Could not recover wallet: %v", err)
|
log.WithError(err).Fatal("Could not recover wallet")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ var Commands = &cli.Command{
|
|||||||
gatewayPort := cliCtx.Int(flags.GRPCGatewayPort.Name)
|
gatewayPort := cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||||
validatorWebAddr := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
validatorWebAddr := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||||
if err := rpc.CreateAuthToken(walletDirPath, validatorWebAddr); err != nil {
|
if err := rpc.CreateAuthToken(walletDirPath, validatorWebAddr); err != nil {
|
||||||
log.Fatalf("Could not create web auth token: %v", err)
|
log.WithError(err).Fatal("Could not create web auth token")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func (s *Service) healthzHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := writeResponse(w, r, response); err != nil {
|
if err := writeResponse(w, r, response); err != nil {
|
||||||
log.Errorf("Error writing response: %v", err)
|
log.WithError(err).Error("Error writing response")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,7 +139,7 @@ func (s *Service) Start() {
|
|||||||
log.WithField("address", s.server.Addr).Debug("Starting prometheus service")
|
log.WithField("address", s.server.Addr).Debug("Starting prometheus service")
|
||||||
err := s.server.ListenAndServe()
|
err := s.server.ListenAndServe()
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
log.Errorf("Could not listen to host:port :%s: %v", s.server.Addr, err)
|
log.WithError(err).Errorf("Could not listen to host:port :%s", s.server.Addr)
|
||||||
s.failStatus = err
|
s.failStatus = err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,6 +92,11 @@
|
|||||||
".*/.*_test\\.go": "Tests are OK to ignore this check for"
|
".*/.*_test\\.go": "Tests are OK to ignore this check for"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"logruswitherror": {
|
||||||
|
"exclude_files": {
|
||||||
|
"external/.*": "Third party code"
|
||||||
|
}
|
||||||
|
},
|
||||||
"cryptorand": {
|
"cryptorand": {
|
||||||
"only_files": {
|
"only_files": {
|
||||||
"beacon-chain/.*": "",
|
"beacon-chain/.*": "",
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
|
|||||||
}
|
}
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
if err := pprof.StartCPUProfile(f); err != nil {
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
log.Errorf("Failed to close file: %v", err)
|
log.WithError(err).Error("Failed to close file")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -191,7 +191,7 @@ func (h *HandlerT) StartGoTrace(file string) error {
|
|||||||
}
|
}
|
||||||
if err := trace.Start(f); err != nil {
|
if err := trace.Start(f); err != nil {
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
log.Errorf("Failed to close file: %v", err)
|
log.WithError(err).Error("Failed to close file")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -270,7 +270,7 @@ func (*HandlerT) WriteMemProfile(file string) error {
|
|||||||
func (*HandlerT) Stacks() string {
|
func (*HandlerT) Stacks() string {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := pprof.Lookup("goroutine").WriteTo(buf, 2); err != nil {
|
if err := pprof.Lookup("goroutine").WriteTo(buf, 2); err != nil {
|
||||||
log.Errorf("Failed to write pprof goroutine stacks: %v", err)
|
log.WithError(err).Error("Failed to write pprof goroutine stacks")
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
@@ -365,12 +365,12 @@ func startPProf(address string) {
|
|||||||
func Exit(ctx *cli.Context) {
|
func Exit(ctx *cli.Context) {
|
||||||
if traceFile := ctx.String(TraceFlag.Name); traceFile != "" {
|
if traceFile := ctx.String(TraceFlag.Name); traceFile != "" {
|
||||||
if err := Handler.StopGoTrace(); err != nil {
|
if err := Handler.StopGoTrace(); err != nil {
|
||||||
log.Errorf("Failed to stop go tracing: %v", err)
|
log.WithError(err).Error("Failed to stop go tracing")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cpuFile := ctx.String(CPUProfileFlag.Name); cpuFile != "" {
|
if cpuFile := ctx.String(CPUProfileFlag.Name); cpuFile != "" {
|
||||||
if err := Handler.StopCPUProfile(); err != nil {
|
if err := Handler.StopCPUProfile(); err != nil {
|
||||||
log.Errorf("Failed to stop CPU profiling: %v", err)
|
log.WithError(err).Error("Failed to stop CPU profiling")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ func meetsMinPlatformReqs(ctx context.Context) (bool, error) {
|
|||||||
func WarnIfPlatformNotSupported(ctx context.Context) {
|
func WarnIfPlatformNotSupported(ctx context.Context) {
|
||||||
supported, err := meetsMinPlatformReqs(ctx)
|
supported, err := meetsMinPlatformReqs(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Failed to detect host platform: %v", err)
|
log.WithError(err).Warn("Failed to detect host platform")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !supported {
|
if !supported {
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ go_library(
|
|||||||
"//testing/benchmark:go_default_library",
|
"//testing/benchmark:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
"//testing/util:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
@@ -22,6 +21,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/runtime/interop"
|
"github.com/prysmaticlabs/prysm/runtime/interop"
|
||||||
"github.com/prysmaticlabs/prysm/testing/benchmark"
|
"github.com/prysmaticlabs/prysm/testing/benchmark"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/testing/util"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -60,15 +60,15 @@ func main() {
|
|||||||
log.Println("Generating genesis state")
|
log.Println("Generating genesis state")
|
||||||
// Generating this for the 2 following states.
|
// Generating this for the 2 following states.
|
||||||
if err := generateGenesisBeaconState(); err != nil {
|
if err := generateGenesisBeaconState(); err != nil {
|
||||||
log.Fatalf("Could not generate genesis state: %v", err)
|
log.WithError(err).Fatal("Could not generate genesis state")
|
||||||
}
|
}
|
||||||
log.Println("Generating full block and state after 1 skipped epoch")
|
log.Println("Generating full block and state after 1 skipped epoch")
|
||||||
if err := generateMarshalledFullStateAndBlock(); err != nil {
|
if err := generateMarshalledFullStateAndBlock(); err != nil {
|
||||||
log.Fatalf("Could not generate full state and block: %v", err)
|
log.WithError(err).Fatal("Could not generate full state and block")
|
||||||
}
|
}
|
||||||
log.Println("Generating state after 2 fully attested epochs")
|
log.Println("Generating state after 2 fully attested epochs")
|
||||||
if err := generate2FullEpochState(); err != nil {
|
if err := generate2FullEpochState(); err != nil {
|
||||||
log.Fatalf("Could not generate 2 full epoch state: %v", err)
|
log.WithError(err).Fatal("Could not generate 2 full epoch state")
|
||||||
}
|
}
|
||||||
// Removing the genesis state SSZ since its 10MB large and no longer needed.
|
// Removing the genesis state SSZ since its 10MB large and no longer needed.
|
||||||
if err := os.Remove(path.Join(*outputDir, benchmark.GenesisFileName)); err != nil {
|
if err := os.Remove(path.Join(*outputDir, benchmark.GenesisFileName)); err != nil {
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func main() {
|
|||||||
mux.HandleFunc("/p2p", handler.httpHandler)
|
mux.HandleFunc("/p2p", handler.httpHandler)
|
||||||
|
|
||||||
if err := http.ListenAndServe(fmt.Sprintf(":%d", *metricsPort), mux); err != nil {
|
if err := http.ListenAndServe(fmt.Sprintf(":%d", *metricsPort), mux); err != nil {
|
||||||
log.Fatalf("Failed to start server %v", err)
|
log.WithError(err).Fatal("Failed to start server")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update metrics once per slot.
|
// Update metrics once per slot.
|
||||||
|
|||||||
@@ -44,17 +44,17 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if net.ParseIP(*ipAddr).To4() == nil {
|
if net.ParseIP(*ipAddr).To4() == nil {
|
||||||
log.Fatalf("Invalid ipv4 address given: %v\n", err)
|
log.WithField("address", *ipAddr).Fatal("Invalid ipv4 address given")
|
||||||
}
|
}
|
||||||
|
|
||||||
if *udpPort == 0 {
|
if *udpPort == 0 {
|
||||||
log.Fatalf("Invalid udp port given: %v\n", err)
|
log.WithField("port", *udpPort).Fatal("Invalid udp port given")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := enode.OpenDB("")
|
db, err := enode.OpenDB("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not open node's peer database: %v\n", err)
|
log.WithError(err).Fatal("Could not open node's peer database")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func main() {
|
|||||||
// check if the database file is present.
|
// check if the database file is present.
|
||||||
dbNameWithPath := filepath.Join(*datadir, *dbName)
|
dbNameWithPath := filepath.Join(*datadir, *dbName)
|
||||||
if _, err := os.Stat(dbNameWithPath); os.IsNotExist(err) {
|
if _, err := os.Stat(dbNameWithPath); os.IsNotExist(err) {
|
||||||
log.Fatalf("could not locate database file : %s, %v", dbNameWithPath, err)
|
log.WithError(err).WithField("path", dbNameWithPath).Fatal("could not locate database file")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch *command {
|
switch *command {
|
||||||
@@ -104,7 +104,7 @@ func main() {
|
|||||||
case "migration-check":
|
case "migration-check":
|
||||||
destDbNameWithPath := filepath.Join(*destDatadir, *dbName)
|
destDbNameWithPath := filepath.Join(*destDatadir, *dbName)
|
||||||
if _, err := os.Stat(destDbNameWithPath); os.IsNotExist(err) {
|
if _, err := os.Stat(destDbNameWithPath); os.IsNotExist(err) {
|
||||||
log.Fatalf("could not locate destination database file : %s, %v", destDbNameWithPath, err)
|
log.WithError(err).WithField("path", destDbNameWithPath).Fatal("could not locate database file")
|
||||||
}
|
}
|
||||||
switch *migrationName {
|
switch *migrationName {
|
||||||
case "validator-entries":
|
case "validator-entries":
|
||||||
@@ -133,14 +133,14 @@ func printBucketContents(dbNameWithPath string, rowLimit uint64, bucketName stri
|
|||||||
dbDirectory := filepath.Dir(dbNameWithPath)
|
dbDirectory := filepath.Dir(dbNameWithPath)
|
||||||
db, openErr := kv.NewKVStore(context.Background(), dbDirectory, &kv.Config{})
|
db, openErr := kv.NewKVStore(context.Background(), dbDirectory, &kv.Config{})
|
||||||
if openErr != nil {
|
if openErr != nil {
|
||||||
log.Fatalf("could not open db, %v", openErr)
|
log.WithError(openErr).Fatal("could not open db")
|
||||||
}
|
}
|
||||||
|
|
||||||
// don't forget to close it when ejecting out of this function.
|
// don't forget to close it when ejecting out of this function.
|
||||||
defer func() {
|
defer func() {
|
||||||
closeErr := db.Close()
|
closeErr := db.Close()
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
log.Fatalf("could not close db, %v", closeErr)
|
log.WithError(closeErr).Fatal("could not close db")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -166,14 +166,14 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) {
|
|||||||
// open the raw database file. If the file is busy, then exit.
|
// open the raw database file. If the file is busy, then exit.
|
||||||
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||||
if openErr != nil {
|
if openErr != nil {
|
||||||
log.Fatalf("could not open db to show bucket stats, %v", openErr)
|
log.WithError(openErr).Fatal("could not open db to show bucket stats")
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure we close the database before ejecting out of this function.
|
// make sure we close the database before ejecting out of this function.
|
||||||
defer func() {
|
defer func() {
|
||||||
closeErr := db.Close()
|
closeErr := db.Close()
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
log.Fatalf("could not close db after showing bucket stats, %v", closeErr)
|
log.WithError(closeErr).Fatalf("could not close db after showing bucket stats")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -185,7 +185,7 @@ func readBucketStat(dbNameWithPath string, statsC chan<- *bucketStat) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}); viewErr1 != nil {
|
}); viewErr1 != nil {
|
||||||
log.Fatalf("could not read buckets from db while getting list of buckets: %v", viewErr1)
|
log.WithError(viewErr1).Fatal("could not read buckets from db while getting list of buckets")
|
||||||
}
|
}
|
||||||
|
|
||||||
// for every bucket, calculate the stats and send it for printing.
|
// for every bucket, calculate the stats and send it for printing.
|
||||||
@@ -381,7 +381,7 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|||||||
sourceDbDirectory := filepath.Dir(dbNameWithPath)
|
sourceDbDirectory := filepath.Dir(dbNameWithPath)
|
||||||
sourceDB, openErr := kv.NewKVStore(context.Background(), sourceDbDirectory, &kv.Config{})
|
sourceDB, openErr := kv.NewKVStore(context.Background(), sourceDbDirectory, &kv.Config{})
|
||||||
if openErr != nil {
|
if openErr != nil {
|
||||||
log.Fatalf("could not open sourceDB: %v", openErr)
|
log.WithError(openErr).Fatal("could not open sourceDB")
|
||||||
}
|
}
|
||||||
|
|
||||||
destinationDbDirectory := filepath.Dir(destDbNameWithPath)
|
destinationDbDirectory := filepath.Dir(destDbNameWithPath)
|
||||||
@@ -391,7 +391,7 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|||||||
// if you want to avoid this then we should pass the metric name when opening the DB which touches
|
// if you want to avoid this then we should pass the metric name when opening the DB which touches
|
||||||
// too many places.
|
// too many places.
|
||||||
if openErr.Error() != "duplicate metrics collector registration attempted" {
|
if openErr.Error() != "duplicate metrics collector registration attempted" {
|
||||||
log.Fatalf("could not open sourceDB, %v", openErr)
|
log.WithError(openErr).Fatalf("could not open sourceDB")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -399,13 +399,13 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
closeErr := sourceDB.Close()
|
closeErr := sourceDB.Close()
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
log.Fatalf("could not close sourceDB: %v", closeErr)
|
log.WithError(closeErr).Fatal("could not close sourceDB")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
defer func() {
|
defer func() {
|
||||||
closeErr := destDB.Close()
|
closeErr := destDB.Close()
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
log.Fatalf("could not close sourceDB: %v", closeErr)
|
log.WithError(closeErr).Fatal("could not close sourceDB")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -414,11 +414,11 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|||||||
for rowCount, key := range sourceStateKeys[910:] {
|
for rowCount, key := range sourceStateKeys[910:] {
|
||||||
sourceState, stateErr := sourceDB.State(ctx, bytesutil.ToBytes32(key))
|
sourceState, stateErr := sourceDB.State(ctx, bytesutil.ToBytes32(key))
|
||||||
if stateErr != nil {
|
if stateErr != nil {
|
||||||
log.Fatalf("could not get from source db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr)
|
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from source db, the state for key")
|
||||||
}
|
}
|
||||||
destinationState, stateErr := destDB.State(ctx, bytesutil.ToBytes32(key))
|
destinationState, stateErr := destDB.State(ctx, bytesutil.ToBytes32(key))
|
||||||
if stateErr != nil {
|
if stateErr != nil {
|
||||||
log.Fatalf("could not get destination db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr)
|
log.WithError(stateErr).WithField("key", hexutils.BytesToHex(key)).Fatalf("could not get from destination db, the state for key")
|
||||||
}
|
}
|
||||||
if destinationState == nil {
|
if destinationState == nil {
|
||||||
log.Infof("could not find state in migrated DB: index = %d, slot = %d, epoch = %d, numOfValidators = %d, key = %s",
|
log.Infof("could not find state in migrated DB: index = %d, slot = %d, epoch = %d, numOfValidators = %d, key = %s",
|
||||||
@@ -432,13 +432,13 @@ func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|||||||
}
|
}
|
||||||
sourceStateHash, err := sourceState.HashTreeRoot(ctx)
|
sourceStateHash, err := sourceState.HashTreeRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not find hash of source state: %v", err)
|
log.WithError(err).Fatal("could not find hash of source state")
|
||||||
}
|
}
|
||||||
destinationSatteHash, err := destinationState.HashTreeRoot(ctx)
|
destinationStateHash, err := destinationState.HashTreeRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("could not find hash of destination state: %v", err)
|
log.WithError(err).Fatal("could not find hash of destination state")
|
||||||
}
|
}
|
||||||
if !bytes.Equal(sourceStateHash[:], destinationSatteHash[:]) {
|
if !bytes.Equal(sourceStateHash[:], destinationStateHash[:]) {
|
||||||
log.Fatalf("state mismatch : key = %s", hexutils.BytesToHex(key))
|
log.Fatalf("state mismatch : key = %s", hexutils.BytesToHex(key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,14 +449,14 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([]
|
|||||||
// open the raw database file. If the file is busy, then exit.
|
// open the raw database file. If the file is busy, then exit.
|
||||||
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||||
if openErr != nil {
|
if openErr != nil {
|
||||||
log.Fatalf("could not open db while getting keys of a bucket, %v", openErr)
|
log.WithError(openErr).Fatal("could not open db while getting keys of a bucket")
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure we close the database before ejecting out of this function.
|
// make sure we close the database before ejecting out of this function.
|
||||||
defer func() {
|
defer func() {
|
||||||
closeErr := db.Close()
|
closeErr := db.Close()
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
log.Fatalf("could not close db while getting keys of a bucket, %v", closeErr)
|
log.WithError(closeErr).Fatal("could not close db while getting keys of a bucket")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -481,7 +481,7 @@ func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([]
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); viewErr != nil {
|
}); viewErr != nil {
|
||||||
log.Fatalf("could not read keys of bucket from db: %v", viewErr)
|
log.WithError(viewErr).Fatal("could not read keys of bucket from db")
|
||||||
}
|
}
|
||||||
return keys, sizes
|
return keys, sizes
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func main() {
|
|||||||
for _, endpt := range endpts {
|
for _, endpt := range endpts {
|
||||||
conn, err := grpc.Dial(endpt, grpc.WithInsecure())
|
conn, err := grpc.Dial(endpt, grpc.WithInsecure())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("fail to dial: %v", err)
|
log.WithError(err).Fatal("fail to dial")
|
||||||
}
|
}
|
||||||
clients[endpt] = pb.NewBeaconChainClient(conn)
|
clients[endpt] = pb.NewBeaconChainClient(conn)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ go_library(
|
|||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//runtime/interop:go_default_library",
|
"//runtime/interop:go_default_library",
|
||||||
"@com_github_ghodss_yaml//:go_default_library",
|
"@com_github_ghodss_yaml//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -15,6 +14,7 @@ import (
|
|||||||
"github.com/prysmaticlabs/prysm/io/file"
|
"github.com/prysmaticlabs/prysm/io/file"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/runtime/interop"
|
"github.com/prysmaticlabs/prysm/runtime/interop"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DepositDataJSON representing a json object of hex string and uint64 values for
|
// DepositDataJSON representing a json object of hex string and uint64 values for
|
||||||
@@ -70,23 +70,23 @@ func main() {
|
|||||||
inputFile := *depositJSONFile
|
inputFile := *depositJSONFile
|
||||||
expanded, err := file.ExpandPath(inputFile)
|
expanded, err := file.ExpandPath(inputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not expand file path %s: %v", inputFile, err)
|
log.WithError(err).Printf("Could not expand file path %s", inputFile)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
inputJSON, err := os.Open(expanded) // #nosec G304
|
inputJSON, err := os.Open(expanded) // #nosec G304
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not open JSON file for reading: %v", err)
|
log.WithError(err).Print("Could not open JSON file for reading")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := inputJSON.Close(); err != nil {
|
if err := inputJSON.Close(); err != nil {
|
||||||
log.Printf("Could not close file %s: %v", inputFile, err)
|
log.WithError(err).Printf("Could not close file %s", inputFile)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
log.Printf("Generating genesis state from input JSON deposit data %s", inputFile)
|
log.Printf("Generating genesis state from input JSON deposit data %s", inputFile)
|
||||||
genesisState, err = genesisStateFromJSONValidators(inputJSON, *genesisTime)
|
genesisState, err = genesisStateFromJSONValidators(inputJSON, *genesisTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not generate genesis beacon state: %v", err)
|
log.WithError(err).Print("Could not generate genesis beacon state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -97,7 +97,7 @@ func main() {
|
|||||||
// If no JSON input is specified, we create the state deterministically from interop keys.
|
// If no JSON input is specified, we create the state deterministically from interop keys.
|
||||||
genesisState, _, err = interop.GenerateGenesisState(context.Background(), *genesisTime, uint64(*numValidators))
|
genesisState, _, err = interop.GenerateGenesisState(context.Background(), *genesisTime, uint64(*numValidators))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not generate genesis beacon state: %v", err)
|
log.WithError(err).Print("Could not generate genesis beacon state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,11 +105,11 @@ func main() {
|
|||||||
if *sszOutputFile != "" {
|
if *sszOutputFile != "" {
|
||||||
encodedState, err := genesisState.MarshalSSZ()
|
encodedState, err := genesisState.MarshalSSZ()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not ssz marshal the genesis beacon state: %v", err)
|
log.WithError(err).Print("Could not ssz marshal the genesis beacon state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := file.WriteFile(*sszOutputFile, encodedState); err != nil {
|
if err := file.WriteFile(*sszOutputFile, encodedState); err != nil {
|
||||||
log.Printf("Could not write encoded genesis beacon state to file: %v", err)
|
log.WithError(err).Print("Could not write encoded genesis beacon state to file")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Printf("Done writing to %s", *sszOutputFile)
|
log.Printf("Done writing to %s", *sszOutputFile)
|
||||||
@@ -117,11 +117,11 @@ func main() {
|
|||||||
if *yamlOutputFile != "" {
|
if *yamlOutputFile != "" {
|
||||||
encodedState, err := yaml.Marshal(genesisState)
|
encodedState, err := yaml.Marshal(genesisState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not yaml marshal the genesis beacon state: %v", err)
|
log.WithError(err).Print("Could not yaml marshal the genesis beacon state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := file.WriteFile(*yamlOutputFile, encodedState); err != nil {
|
if err := file.WriteFile(*yamlOutputFile, encodedState); err != nil {
|
||||||
log.Printf("Could not write encoded genesis beacon state to file: %v", err)
|
log.WithError(err).Print("Could not write encoded genesis beacon state to file")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Printf("Done writing to %s", *yamlOutputFile)
|
log.Printf("Done writing to %s", *yamlOutputFile)
|
||||||
@@ -129,11 +129,11 @@ func main() {
|
|||||||
if *jsonOutputFile != "" {
|
if *jsonOutputFile != "" {
|
||||||
encodedState, err := json.Marshal(genesisState)
|
encodedState, err := json.Marshal(genesisState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not json marshal the genesis beacon state: %v", err)
|
log.WithError(err).Print("Could not json marshal the genesis beacon state")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := file.WriteFile(*jsonOutputFile, encodedState); err != nil {
|
if err := file.WriteFile(*jsonOutputFile, encodedState); err != nil {
|
||||||
log.Printf("Could not write encoded genesis beacon state to file: %v", err)
|
log.WithError(err).Print("Could not write encoded genesis beacon state to file")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Printf("Done writing to %s", *jsonOutputFile)
|
log.Printf("Done writing to %s", *jsonOutputFile)
|
||||||
|
|||||||
@@ -6,7 +6,10 @@ go_library(
|
|||||||
srcs = ["main.go"],
|
srcs = ["main.go"],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/tools/gocovmerge",
|
importpath = "github.com/prysmaticlabs/prysm/tools/gocovmerge",
|
||||||
visibility = ["//visibility:private"],
|
visibility = ["//visibility:private"],
|
||||||
deps = ["@org_golang_x_tools//cover:go_default_library"],
|
deps = [
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
|
"@org_golang_x_tools//cover:go_default_library",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
go_binary(
|
go_binary(
|
||||||
|
|||||||
@@ -8,10 +8,10 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.org/x/tools/cover"
|
"golang.org/x/tools/cover"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -127,7 +127,7 @@ func main() {
|
|||||||
for _, file := range flag.Args() {
|
for _, file := range flag.Args() {
|
||||||
profiles, err := cover.ParseProfiles(file)
|
profiles, err := cover.ParseProfiles(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to parse profiles: %v", err)
|
log.WithError(err).Fatal("failed to parse profiles")
|
||||||
|
|
||||||
}
|
}
|
||||||
for _, p := range profiles {
|
for _, p := range profiles {
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//tools/unencrypted-keys-gen/keygen:go_default_library",
|
"//tools/unencrypted-keys-gen/keygen:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@in_gopkg_yaml_v2//:go_default_library",
|
"@in_gopkg_yaml_v2//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/tools/unencrypted-keys-gen/keygen"
|
"github.com/prysmaticlabs/prysm/tools/unencrypted-keys-gen/keygen"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,18 +32,18 @@ func main() {
|
|||||||
|
|
||||||
in, err := os.ReadFile(inFile) // #nosec G304
|
in, err := os.ReadFile(inFile) // #nosec G304
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to read file %s: %v", inFile, err)
|
log.WithError(err).Fatalf("Failed to read file %s", inFile)
|
||||||
}
|
}
|
||||||
data := make(KeyPairs, 0)
|
data := make(KeyPairs, 0)
|
||||||
if err := yaml.UnmarshalStrict(in, &data); err != nil {
|
if err := yaml.UnmarshalStrict(in, &data); err != nil {
|
||||||
log.Fatalf("Failed to unmarshal yaml: %v", err)
|
log.WithError(err).Fatal("Failed to unmarshal yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := &keygen.UnencryptedKeysContainer{}
|
out := &keygen.UnencryptedKeysContainer{}
|
||||||
for _, key := range data {
|
for _, key := range data {
|
||||||
pk, err := hex.DecodeString(key.Priv[2:])
|
pk, err := hex.DecodeString(key.Priv[2:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to decode hex string %s: %v", key.Priv, err)
|
log.WithError(err).Fatalf("Failed to decode hex string %s", key.Priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
out.Keys = append(out.Keys, &keygen.UnencryptedKeys{
|
out.Keys = append(out.Keys, &keygen.UnencryptedKeys{
|
||||||
@@ -54,7 +54,7 @@ func main() {
|
|||||||
|
|
||||||
outFile, err := os.Create(os.Args[2])
|
outFile, err := os.Create(os.Args[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to create file at %s: %v", os.Args[2], err)
|
log.WithError(err).Fatalf("Failed to create file at %s", os.Args[2])
|
||||||
}
|
}
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if err := outFile.Close(); err != nil {
|
if err := outFile.Close(); err != nil {
|
||||||
@@ -65,7 +65,7 @@ func main() {
|
|||||||
if err := keygen.SaveUnencryptedKeysToFile(outFile, out); err != nil {
|
if err := keygen.SaveUnencryptedKeysToFile(outFile, out); err != nil {
|
||||||
// log.Fatalf will prevent defer from being called
|
// log.Fatalf will prevent defer from being called
|
||||||
cleanup()
|
cleanup()
|
||||||
log.Fatalf("Failed to save %v", err)
|
log.WithError(err).Fatal("Failed to save")
|
||||||
}
|
}
|
||||||
log.Printf("Wrote %s\n", os.Args[2])
|
log.Printf("Wrote %s\n", os.Args[2])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot
|
|||||||
|
|
||||||
duty, err := v.duty(pubKey)
|
duty, err := v.duty(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not fetch validator assignment: %v", err)
|
log.WithError(err).Error("Could not fetch validator assignment")
|
||||||
if v.emitAccountMetrics {
|
if v.emitAccountMetrics {
|
||||||
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
||||||
}
|
}
|
||||||
@@ -52,7 +52,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot
|
|||||||
|
|
||||||
slotSig, err := v.signSlotWithSelectionProof(ctx, pubKey, slot)
|
slotSig, err := v.signSlotWithSelectionProof(ctx, pubKey, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not sign slot: %v", err)
|
log.WithError(err).Error("Could not sign slot")
|
||||||
if v.emitAccountMetrics {
|
if v.emitAccountMetrics {
|
||||||
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
||||||
}
|
}
|
||||||
@@ -86,7 +86,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot
|
|||||||
|
|
||||||
sig, err := v.aggregateAndProofSig(ctx, pubKey, res.AggregateAndProof, slot)
|
sig, err := v.aggregateAndProofSig(ctx, pubKey, res.AggregateAndProof, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not sign aggregate and proof: %v", err)
|
log.WithError(err).Error("Could not sign aggregate and proof")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, ðpb.SignedAggregateSubmitRequest{
|
_, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, ðpb.SignedAggregateSubmitRequest{
|
||||||
@@ -96,7 +96,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err)
|
log.WithError(err).Error("Could not submit signed aggregate and proof to beacon node")
|
||||||
if v.emitAccountMetrics {
|
if v.emitAccountMetrics {
|
||||||
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
||||||
}
|
}
|
||||||
@@ -104,7 +104,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot types.Slot
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := v.addIndicesToLog(duty); err != nil {
|
if err := v.addIndicesToLog(duty); err != nil {
|
||||||
log.Errorf("Could not add aggregator indices to logs: %v", err)
|
log.WithError(err).Error("Could not add aggregator indices to logs")
|
||||||
if v.emitAccountMetrics {
|
if v.emitAccountMetrics {
|
||||||
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,15 +51,15 @@ func run(ctx context.Context, v iface.Validator) {
|
|||||||
accountsChangedChan := make(chan [][fieldparams.BLSPubkeyLength]byte, 1)
|
accountsChangedChan := make(chan [][fieldparams.BLSPubkeyLength]byte, 1)
|
||||||
km, err := v.Keymanager()
|
km, err := v.Keymanager()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not get keymanager: %v", err)
|
log.WithError(err).Fatal("Could not get keymanager")
|
||||||
}
|
}
|
||||||
sub := km.SubscribeAccountChanges(accountsChangedChan)
|
sub := km.SubscribeAccountChanges(accountsChangedChan)
|
||||||
// Set properties on the beacon node like the fee recipient for validators that are being used & active.
|
// Set properties on the beacon node like the fee recipient for validators that are being used & active.
|
||||||
if err := v.PushProposerSettings(ctx, km); err != nil {
|
if err := v.PushProposerSettings(ctx, km); err != nil {
|
||||||
if errors.Is(err, ErrBuilderValidatorRegistration) {
|
if errors.Is(err, ErrBuilderValidatorRegistration) {
|
||||||
log.Warnf("Push proposer settings error, %v", err)
|
log.WithError(err).Warn("Push proposer settings error")
|
||||||
} else {
|
} else {
|
||||||
log.Fatalf("Failed to update proposer settings: %v", err) // allow fatal. skipcq
|
log.WithError(err).Fatal("Failed to update proposer settings") // allow fatal. skipcq
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -89,7 +89,7 @@ func run(ctx context.Context, v iface.Validator) {
|
|||||||
log.Info("No active keys found. Waiting for activation...")
|
log.Info("No active keys found. Waiting for activation...")
|
||||||
err := v.WaitForActivation(ctx, accountsChangedChan)
|
err := v.WaitForActivation(ctx, accountsChangedChan)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not wait for validator activation: %v", err)
|
log.WithError(err).Fatal("Could not wait for validator activation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case slot := <-v.NextSlot():
|
case slot := <-v.NextSlot():
|
||||||
@@ -122,7 +122,7 @@ func run(ctx context.Context, v iface.Validator) {
|
|||||||
go func() {
|
go func() {
|
||||||
//deadline set for next epoch rounded up
|
//deadline set for next epoch rounded up
|
||||||
if err := v.PushProposerSettings(ctx, km); err != nil {
|
if err := v.PushProposerSettings(ctx, km); err != nil {
|
||||||
log.Warnf("Failed to update proposer settings: %v", err)
|
log.WithError(err).Warn("Failed to update proposer settings")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -173,52 +173,52 @@ func waitForActivation(ctx context.Context, v iface.Validator) (types.Slot, erro
|
|||||||
}
|
}
|
||||||
err := v.WaitForChainStart(ctx)
|
err := v.WaitForChainStart(ctx)
|
||||||
if isConnectionError(err) {
|
if isConnectionError(err) {
|
||||||
log.Warnf("Could not determine if beacon chain started: %v", err)
|
log.WithError(err).Warn("Could not determine if beacon chain started")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not determine if beacon chain started: %v", err)
|
log.WithError(err).Fatal("Could not determine if beacon chain started")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = v.WaitForKeymanagerInitialization(ctx)
|
err = v.WaitForKeymanagerInitialization(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// log.Fatalf will prevent defer from being called
|
// log.Fatal will prevent defer from being called
|
||||||
v.Done()
|
v.Done()
|
||||||
log.Fatalf("Wallet is not ready: %v", err)
|
log.WithError(err).Fatal("Wallet is not ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = v.WaitForSync(ctx)
|
err = v.WaitForSync(ctx)
|
||||||
if isConnectionError(err) {
|
if isConnectionError(err) {
|
||||||
log.Warnf("Could not determine if beacon chain started: %v", err)
|
log.WithError(err).Warn("Could not determine if beacon chain started")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not determine if beacon node synced: %v", err)
|
log.WithError(err).Fatal("Could not determine if beacon node synced")
|
||||||
}
|
}
|
||||||
err = v.WaitForActivation(ctx, nil /* accountsChangedChan */)
|
err = v.WaitForActivation(ctx, nil /* accountsChangedChan */)
|
||||||
if isConnectionError(err) {
|
if isConnectionError(err) {
|
||||||
log.Warnf("Could not wait for validator activation: %v", err)
|
log.WithError(err).Warn("Could not wait for validator activation")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not wait for validator activation: %v", err)
|
log.WithError(err).Fatal("Could not wait for validator activation")
|
||||||
}
|
}
|
||||||
|
|
||||||
headSlot, err = v.CanonicalHeadSlot(ctx)
|
headSlot, err = v.CanonicalHeadSlot(ctx)
|
||||||
if isConnectionError(err) {
|
if isConnectionError(err) {
|
||||||
log.Warnf("Could not get current canonical head slot: %v", err)
|
log.WithError(err).Warn("Could not get current canonical head slot")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not get current canonical head slot: %v", err)
|
log.WithError(err).Fatal("Could not get current canonical head slot")
|
||||||
}
|
}
|
||||||
err = v.CheckDoppelGanger(ctx)
|
err = v.CheckDoppelGanger(ctx)
|
||||||
if isConnectionError(err) {
|
if isConnectionError(err) {
|
||||||
log.Warnf("Could not wait for checking doppelganger: %v", err)
|
log.WithError(err).Warn("Could not wait for checking doppelganger")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not succeed with doppelganger check: %v", err)
|
log.WithError(err).Fatal("Could not succeed with doppelganger check")
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ func (v *ValidatorService) Start() {
|
|||||||
|
|
||||||
sPubKeys, err := v.db.EIPImportBlacklistedPublicKeys(v.ctx)
|
sPubKeys, err := v.db.EIPImportBlacklistedPublicKeys(v.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not read slashable public keys from disk: %v", err)
|
log.WithError(err).Error("Could not read slashable public keys from disk")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
slashablePublicKeys := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
|
slashablePublicKeys := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
|
||||||
@@ -176,7 +176,7 @@ func (v *ValidatorService) Start() {
|
|||||||
|
|
||||||
graffitiOrderedIndex, err := v.db.GraffitiOrderedIndex(v.ctx, v.graffitiStruct.Hash)
|
graffitiOrderedIndex, err := v.db.GraffitiOrderedIndex(v.ctx, v.graffitiStruct.Hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not read graffiti ordered index from disk: %v", err)
|
log.WithError(err).Error("Could not read graffiti ordered index from disk")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ func ConstructDialOptions(
|
|||||||
if withCert != "" {
|
if withCert != "" {
|
||||||
creds, err := credentials.NewClientTLSFromFile(withCert, "")
|
creds, err := credentials.NewClientTLSFromFile(withCert, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not get valid credentials: %v", err)
|
log.WithError(err).Error("Could not get valid credentials")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
transportSecurity = grpc.WithTransportCredentials(creds)
|
transportSecurity = grpc.WithTransportCredentials(creds)
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
|
|
||||||
duty, err := v.duty(pubKey)
|
duty, err := v.duty(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not fetch validator assignment: %v", err)
|
log.WithError(err).Error("Could not fetch validator assignment")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,7 +108,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
Slot: slot,
|
Slot: slot,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not get sync subcommittee index: %v", err)
|
log.WithError(err).Error("Could not get sync subcommittee index")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(indexRes.Indices) == 0 {
|
if len(indexRes.Indices) == 0 {
|
||||||
@@ -118,7 +118,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
|
|
||||||
selectionProofs, err := v.selectionProofs(ctx, slot, pubKey, indexRes)
|
selectionProofs, err := v.selectionProofs(ctx, slot, pubKey, indexRes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not get selection proofs: %v", err)
|
log.WithError(err).Error("Could not get selection proofs")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +127,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
for i, comIdx := range indexRes.Indices {
|
for i, comIdx := range indexRes.Indices {
|
||||||
isAggregator, err := altair.IsSyncCommitteeAggregator(selectionProofs[i])
|
isAggregator, err := altair.IsSyncCommitteeAggregator(selectionProofs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could check in aggregator: %v", err)
|
log.WithError(err).Error("Could check in aggregator")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !isAggregator {
|
if !isAggregator {
|
||||||
@@ -141,7 +141,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
SubnetId: subnet,
|
SubnetId: subnet,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not get sync committee contribution: %v", err)
|
log.WithError(err).Error("Could not get sync committee contribution")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if contribution.AggregationBits.Count() == 0 {
|
if contribution.AggregationBits.Count() == 0 {
|
||||||
@@ -160,7 +160,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
}
|
}
|
||||||
sig, err := v.signContributionAndProof(ctx, pubKey, contributionAndProof, slot)
|
sig, err := v.signContributionAndProof(ctx, pubKey, contributionAndProof, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not sign contribution and proof: %v", err)
|
log.WithError(err).Error("Could not sign contribution and proof")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,7 +168,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot t
|
|||||||
Message: contributionAndProof,
|
Message: contributionAndProof,
|
||||||
Signature: sig,
|
Signature: sig,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Errorf("Could not submit signed contribution and proof: %v", err)
|
log.WithError(err).Error("Could not submit signed contribution and proof")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -259,7 +259,8 @@ func TestSubmitSignedContributionAndProof_BadDomain(t *testing.T) {
|
|||||||
}, errors.New("bad domain response"))
|
}, errors.New("bad domain response"))
|
||||||
|
|
||||||
validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey)
|
validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not get selection proofs: bad domain response")
|
require.LogsContain(t, hook, "Could not get selection proofs")
|
||||||
|
require.LogsContain(t, hook, "bad domain response")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T) {
|
func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T) {
|
||||||
|
|||||||
@@ -218,6 +218,6 @@ func unmarshalSignatureResponse(responseBody io.ReadCloser) (bls.Signature, erro
|
|||||||
// closeBody a utility method to wrap an error for closing
|
// closeBody a utility method to wrap an error for closing
|
||||||
func closeBody(body io.Closer) {
|
func closeBody(body io.Closer) {
|
||||||
if err := body.Close(); err != nil {
|
if err := body.Close(); err != nil {
|
||||||
log.Errorf("could not close response body: %v", err)
|
log.WithError(err).Error("could not close response body")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ func UnmarshalOptionsFile(r io.ReadCloser) (*KeymanagerOpts, error) {
|
|||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := r.Close(); err != nil {
|
if err := r.Close(); err != nil {
|
||||||
log.Errorf("Could not close keymanager config file: %v", err)
|
log.WithError(err).Error("Could not close keymanager config file")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
opts := &KeymanagerOpts{
|
opts := &KeymanagerOpts{
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
@@ -24,7 +25,10 @@ func (s *Server) JWTInterceptor() grpc.UnaryServerInterceptor {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
h, err := handler(ctx, req)
|
h, err := handler(ctx, req)
|
||||||
log.Debugf("Request - Method: %s, Error: %v\n", info.FullMethod, err)
|
log.WithError(err).WithFields(logrus.Fields{
|
||||||
|
"FullMethod": info.FullMethod,
|
||||||
|
"Server": info.Server,
|
||||||
|
}).Debug("Request handled")
|
||||||
return h, err
|
return h, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ func (s *Server) Start() {
|
|||||||
address := fmt.Sprintf("%s:%s", s.host, s.port)
|
address := fmt.Sprintf("%s:%s", s.host, s.port)
|
||||||
lis, err := net.Listen("tcp", address)
|
lis, err := net.Listen("tcp", address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not listen to port in Start() %s: %v", address, err)
|
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
|
||||||
}
|
}
|
||||||
s.listener = lis
|
s.listener = lis
|
||||||
|
|
||||||
@@ -182,7 +182,7 @@ func (s *Server) Start() {
|
|||||||
go func() {
|
go func() {
|
||||||
if s.listener != nil {
|
if s.listener != nil {
|
||||||
if err := s.grpcServer.Serve(s.listener); err != nil {
|
if err := s.grpcServer.Serve(s.listener); err != nil {
|
||||||
log.Errorf("Could not serve: %v", err)
|
log.WithError(err).Error("Could not serve")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -190,7 +190,7 @@ func (s *Server) Start() {
|
|||||||
if s.walletDir != "" {
|
if s.walletDir != "" {
|
||||||
token, err := s.initializeAuthToken(s.walletDir)
|
token, err := s.initializeAuthToken(s.walletDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not initialize web auth token: %v", err)
|
log.WithError(err).Error("Could not initialize web auth token")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
validatorWebAddr := fmt.Sprintf("%s:%d", s.validatorGatewayHost, s.validatorGatewayPort)
|
validatorWebAddr := fmt.Sprintf("%s:%d", s.validatorGatewayHost, s.validatorGatewayPort)
|
||||||
|
|||||||
@@ -173,7 +173,7 @@ func (s *Server) DeleteKeystores(
|
|||||||
|
|
||||||
exportedHistory, err := s.slashingProtectionHistoryForDeletedKeys(ctx, req.Pubkeys, statuses)
|
exportedHistory, err := s.slashingProtectionHistoryForDeletedKeys(ctx, req.Pubkeys, statuses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Could not get slashing protection history for deleted keys: %v", err)
|
log.WithError(err).Warn("Could not get slashing protection history for deleted keys")
|
||||||
statuses := groupExportErrors(req, "Non duplicate keys that were existing were deleted, but could not export slashing protection history.")
|
statuses := groupExportErrors(req, "Non duplicate keys that were existing were deleted, but could not export slashing protection history.")
|
||||||
return ðpbservice.DeleteKeystoresResponse{Data: statuses}, nil
|
return ðpbservice.DeleteKeystoresResponse{Data: statuses}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user