Enforce error handling and checking type assertions (#5403)

* Enforce error handling and checking type assertions
* Reference issue #5404 in the TODO message
* doc description
* Merge branch 'master' into errcheck
* fix tests and address @nisdas feedbacK
* gaz
* fix docker image
This commit is contained in:
Preston Van Loon
2020-04-12 21:11:09 -07:00
committed by GitHub
parent a85bf9305d
commit d5ddd012bc
88 changed files with 1088 additions and 169 deletions

View File

@@ -105,6 +105,7 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library", "@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library", "//tools/analyzers/maligned:go_tool_library",
"//tools/analyzers/roughtime:go_tool_library", "//tools/analyzers/roughtime:go_tool_library",
"//tools/analyzers/errcheck:go_tool_library",
], ],
) )

View File

@@ -147,7 +147,11 @@ func (s *Service) run(ctx context.Context) {
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed { if event.Type == statefeed.BlockProcessed {
data := event.Data.(*statefeed.BlockProcessedData) data, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
log.Error("Event feed data is not type *statefeed.BlockProcessedData")
continue
}
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event") log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
headState, err := s.headFetcher.HeadState(ctx) headState, err := s.headFetcher.HeadState(ctx)
if err != nil { if err != nil {

View File

@@ -150,6 +150,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
JustificationBits: []byte{0}, JustificationBits: []byte{0},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
FinalizedCheckpoint: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}, FinalizedCheckpoint: &ethpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
Validators: []*ethpb.Validator{{PublicKey: bytesutil.PadTo([]byte("foo"), 48)}},
Balances: []uint64{0},
}) })
r := [32]byte{'g'} r := [32]byte{'g'}
if err := service.beaconDB.SaveState(ctx, s, r); err != nil { if err := service.beaconDB.SaveState(ctx, s, r); err != nil {

View File

@@ -136,7 +136,9 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
// Prune proto array fork choice nodes, all nodes before finalized check point will // Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned. // be pruned.
s.forkChoiceStore.Prune(ctx, fRoot) if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
return nil, errors.Wrap(err, "could not prune proto array fork choice nodes")
}
s.prevFinalizedCheckpt = s.finalizedCheckpt s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint() s.finalizedCheckpt = postState.FinalizedCheckpoint()

View File

@@ -210,7 +210,11 @@ func (s *Service) Start() {
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.ChainStarted { if event.Type == statefeed.ChainStarted {
data := event.Data.(*statefeed.ChainStartedData) data, ok := event.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received chain start event") log.WithField("starttime", data.StartTime).Debug("Received chain start event")
s.processChainStartTime(ctx, data.StartTime) s.processChainStartTime(ctx, data.StartTime)
return return

View File

@@ -148,7 +148,10 @@ func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRe
} }
func wrapperToKey(i interface{}) (string, error) { func wrapperToKey(i interface{}) (string, error) {
w := i.(*attestationReqResWrapper) w, ok := i.(*attestationReqResWrapper)
if !ok {
return "", errors.New("key is not of type *attestationReqResWrapper")
}
if w == nil { if w == nil {
return "", errors.New("nil wrapper") return "", errors.New("nil wrapper")
} }

View File

@@ -14,8 +14,12 @@ var (
// trim the FIFO queue to the maxSize. // trim the FIFO queue to the maxSize.
func trim(queue *cache.FIFO, maxSize int) { func trim(queue *cache.FIFO, maxSize int) {
for s := len(queue.ListKeys()); s > maxSize; s-- { for s := len(queue.ListKeys()); s > maxSize; s-- {
// #nosec G104 popProcessNoopFunc never returns an error _, err := queue.Pop(popProcessNoopFunc)
_, _ = queue.Pop(popProcessNoopFunc) if err != nil {
// popProcessNoopFunc never returns an error, but we handle this anyway to make linter
// happy.
return
}
} }
} }

View File

@@ -125,7 +125,10 @@ func (c *Eth1DataVoteCache) IncrementEth1DataVote(eth1DataHash [32]byte) (uint64
eth1DataVoteCacheHit.Inc() eth1DataVoteCacheHit.Inc()
eInfo, _ := obj.(*Eth1DataVote) eInfo, ok := obj.(*Eth1DataVote)
if !ok {
return 0, errors.New("cached value is not of type *Eth1DataVote")
}
eInfo.VoteCount++ eInfo.VoteCount++
if err := c.eth1DataVoteCache.Add(eInfo); err != nil { if err := c.eth1DataVoteCache.Add(eInfo); err != nil {

View File

@@ -351,7 +351,11 @@ func unslashedAttestingIndices(state *stateTrie.BeaconState, atts []*pb.PendingA
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] }) sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
// Remove the slashed validator indices. // Remove the slashed validator indices.
for i := 0; i < len(setIndices); i++ { for i := 0; i < len(setIndices); i++ {
if v, _ := state.ValidatorAtIndex(setIndices[i]); v != nil && v.Slashed { v, err := state.ValidatorAtIndex(setIndices[i])
if err != nil {
return nil, errors.Wrap(err, "failed to look up validator")
}
if v != nil && v.Slashed {
setIndices = append(setIndices[:i], setIndices[i+1:]...) setIndices = append(setIndices[:i], setIndices[i+1:]...)
} }
} }

View File

@@ -3,6 +3,7 @@ package precompute
import ( import (
"context" "context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
@@ -12,7 +13,7 @@ import (
// New gets called at the beginning of process epoch cycle to return // New gets called at the beginning of process epoch cycle to return
// pre computed instances of validators attesting records and total // pre computed instances of validators attesting records and total
// balances attested in an epoch. // balances attested in an epoch.
func New(ctx context.Context, state *stateTrie.BeaconState) ([]*Validator, *Balance) { func New(ctx context.Context, state *stateTrie.BeaconState) ([]*Validator, *Balance, error) {
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New") ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
defer span.End() defer span.End()
vp := make([]*Validator, state.NumValidators()) vp := make([]*Validator, state.NumValidators())
@@ -21,7 +22,7 @@ func New(ctx context.Context, state *stateTrie.BeaconState) ([]*Validator, *Bala
currentEpoch := helpers.CurrentEpoch(state) currentEpoch := helpers.CurrentEpoch(state)
prevEpoch := helpers.PrevEpoch(state) prevEpoch := helpers.PrevEpoch(state)
state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error { if err := state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
// Was validator withdrawable or slashed // Was validator withdrawable or slashed
withdrawable := currentEpoch >= val.WithdrawableEpoch() withdrawable := currentEpoch >= val.WithdrawableEpoch()
p := &Validator{ p := &Validator{
@@ -46,6 +47,8 @@ func New(ctx context.Context, state *stateTrie.BeaconState) ([]*Validator, *Bala
vp[idx] = p vp[idx] = p
return nil return nil
}) }); err != nil {
return vp, bp return nil, nil, errors.Wrap(err, "failed to initialize precompute")
}
return vp, bp, nil
} }

View File

@@ -31,7 +31,7 @@ func TestNew(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
e := params.BeaconConfig().FarFutureEpoch e := params.BeaconConfig().FarFutureEpoch
v, b := precompute.New(context.Background(), s) v, b, _ := precompute.New(context.Background(), s)
if !reflect.DeepEqual(v[0], &precompute.Validator{IsSlashed: true, CurrentEpochEffectiveBalance: 100, if !reflect.DeepEqual(v[0], &precompute.Validator{IsSlashed: true, CurrentEpochEffectiveBalance: 100,
InclusionDistance: e, InclusionSlot: e}) { InclusionDistance: e, InclusionSlot: e}) {
t.Error("Incorrect validator 0 status") t.Error("Incorrect validator 0 status")

View File

@@ -35,7 +35,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vp, bp := New(context.Background(), state) vp, bp, _ := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp) vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -88,7 +88,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vp, bp := New(context.Background(), state) vp, bp, _ := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp) vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -173,7 +173,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vp, bp := New(context.Background(), state) vp, bp, _ := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp) vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -28,7 +28,7 @@ func runJustificationAndFinalizationTests(t *testing.T, config string) {
func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, state *state.BeaconState) (*state.BeaconState, error) { func processJustificationAndFinalizationPrecomputeWrapper(t *testing.T, state *state.BeaconState) (*state.BeaconState, error) {
ctx := context.Background() ctx := context.Background()
vp, bp := precompute.New(ctx, state) vp, bp, _ := precompute.New(ctx, state)
_, bp, err := precompute.ProcessAttestations(ctx, state, vp, bp) _, bp, err := precompute.ProcessAttestations(ctx, state, vp, bp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -37,7 +37,7 @@ func processSlashingsWrapper(t *testing.T, state *beaconstate.BeaconState) (*bea
func processSlashingsPrecomputeWrapper(t *testing.T, state *beaconstate.BeaconState) (*beaconstate.BeaconState, error) { func processSlashingsPrecomputeWrapper(t *testing.T, state *beaconstate.BeaconState) (*beaconstate.BeaconState, error) {
ctx := context.Background() ctx := context.Background()
vp, bp := precompute.New(ctx, state) vp, bp, _ := precompute.New(ctx, state)
_, bp, err := precompute.ProcessAttestations(ctx, state, vp, bp) _, bp, err := precompute.ProcessAttestations(ctx, state, vp, bp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -354,12 +354,14 @@ func ShuffledIndices(state *stateTrie.BeaconState, epoch uint64) ([]uint64, erro
} }
indices := make([]uint64, 0, state.NumValidators()) indices := make([]uint64, 0, state.NumValidators())
state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error { if err := state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) { if IsActiveValidatorUsingTrie(val, epoch) {
indices = append(indices, uint64(idx)) indices = append(indices, uint64(idx))
} }
return nil return nil
}) }); err != nil {
return nil, err
}
return UnshuffleList(indices, seed) return UnshuffleList(indices, seed)
} }

View File

@@ -43,12 +43,14 @@ func TotalBalance(state *stateTrie.BeaconState, indices []uint64) uint64 {
// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state)))) // return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
func TotalActiveBalance(state *stateTrie.BeaconState) (uint64, error) { func TotalActiveBalance(state *stateTrie.BeaconState) (uint64, error) {
total := uint64(0) total := uint64(0)
state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error { if err := state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, SlotToEpoch(state.Slot())) { if IsActiveValidatorUsingTrie(val, SlotToEpoch(state.Slot())) {
total += val.EffectiveBalance() total += val.EffectiveBalance()
} }
return nil return nil
}) }); err != nil {
return 0, err
}
return total, nil return total, nil
} }

View File

@@ -74,12 +74,14 @@ func ActiveValidatorIndices(state *stateTrie.BeaconState, epoch uint64) ([]uint6
return activeIndices, nil return activeIndices, nil
} }
var indices []uint64 var indices []uint64
state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error { if err := state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) { if IsActiveValidatorUsingTrie(val, epoch) {
indices = append(indices, uint64(idx)) indices = append(indices, uint64(idx))
} }
return nil return nil
}) }); err != nil {
return nil, err
}
if err := UpdateCommitteeCache(state, epoch); err != nil { if err := UpdateCommitteeCache(state, epoch); err != nil {
return nil, errors.Wrap(err, "could not update committee cache") return nil, errors.Wrap(err, "could not update committee cache")
@@ -92,12 +94,14 @@ func ActiveValidatorIndices(state *stateTrie.BeaconState, epoch uint64) ([]uint6
// at the given epoch. // at the given epoch.
func ActiveValidatorCount(state *stateTrie.BeaconState, epoch uint64) (uint64, error) { func ActiveValidatorCount(state *stateTrie.BeaconState, epoch uint64) (uint64, error) {
count := uint64(0) count := uint64(0)
state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error { if err := state.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) { if IsActiveValidatorUsingTrie(val, epoch) {
count++ count++
} }
return nil return nil
}) }); err != nil {
return 0, err
}
return count, nil return count, nil
} }

View File

@@ -35,6 +35,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library", "@io_opencensus_go//trace:go_default_library",
], ],
) )

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/mathutil" "github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil" "github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace" "go.opencensus.io/trace"
) )
@@ -295,14 +296,21 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} }
defer SkipSlotCache.MarkNotInProgress(key) defer func() {
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
traceutil.AnnotateError(span, err)
logrus.WithError(err).Error("Failed to mark skip slot no longer in progress")
}
}()
for state.Slot() < slot { for state.Slot() < slot {
if ctx.Err() != nil { if ctx.Err() != nil {
traceutil.AnnotateError(span, ctx.Err()) traceutil.AnnotateError(span, ctx.Err())
// Cache last best value. // Cache last best value.
if highestSlot < state.Slot() { if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state) if err := SkipSlotCache.Put(ctx, key, state); err != nil {
logrus.WithError(err).Error("Failed to put skip slot cache value")
}
} }
return nil, ctx.Err() return nil, ctx.Err()
} }
@@ -318,11 +326,17 @@ func ProcessSlots(ctx context.Context, state *stateTrie.BeaconState, slot uint64
return nil, errors.Wrap(err, "could not process epoch with optimizations") return nil, errors.Wrap(err, "could not process epoch with optimizations")
} }
} }
state.SetSlot(state.Slot() + 1) if err := state.SetSlot(state.Slot() + 1); err != nil {
traceutil.AnnotateError(span, err)
return nil, errors.Wrap(err, "failed to increment state slot")
}
} }
if highestSlot < state.Slot() { if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state) if err := SkipSlotCache.Put(ctx, key, state); err != nil {
logrus.WithError(err).Error("Failed to put skip slot cache value")
traceutil.AnnotateError(span, err)
}
} }
return state, nil return state, nil
@@ -605,8 +619,11 @@ func ProcessEpochPrecompute(ctx context.Context, state *stateTrie.BeaconState) (
if state == nil { if state == nil {
return nil, errors.New("nil state") return nil, errors.New("nil state")
} }
vp, bp := precompute.New(ctx, state) vp, bp, err := precompute.New(ctx, state)
vp, bp, err := precompute.ProcessAttestations(ctx, state, vp, bp) if err != nil {
return nil, err
}
vp, bp, err = precompute.ProcessAttestations(ctx, state, vp, bp)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -531,6 +531,7 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
FinalizedCheckpoint: &ethpb.Checkpoint{}, FinalizedCheckpoint: &ethpb.Checkpoint{},
JustificationBits: bitfield.Bitvector4{0x00}, JustificationBits: bitfield.Bitvector4{0x00},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{},
Validators: []*ethpb.Validator{},
} }
s, err := beaconstate.InitializeFromProto(base) s, err := beaconstate.InitializeFromProto(base)
if err != nil { if err != nil {

View File

@@ -54,8 +54,7 @@ func (k *Store) LastArchivedIndexRoot(ctx context.Context) [32]byte {
defer span.End() defer span.End()
var blockRoot []byte var blockRoot []byte
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket) bucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := bucket.Get(lastArchivedIndexKey) lastArchivedIndex := bucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil { if lastArchivedIndex == nil {
@@ -63,7 +62,9 @@ func (k *Store) LastArchivedIndexRoot(ctx context.Context) [32]byte {
} }
blockRoot = bucket.Get(lastArchivedIndex) blockRoot = bucket.Get(lastArchivedIndex)
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return bytesutil.ToBytes32(blockRoot) return bytesutil.ToBytes32(blockRoot)
} }
@@ -75,12 +76,13 @@ func (k *Store) ArchivedPointRoot(ctx context.Context, index uint64) [32]byte {
defer span.End() defer span.End()
var blockRoot []byte var blockRoot []byte
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket) bucket := tx.Bucket(archivedIndexRootBucket)
blockRoot = bucket.Get(bytesutil.Uint64ToBytes(index)) blockRoot = bucket.Get(bytesutil.Uint64ToBytes(index))
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return bytesutil.ToBytes32(blockRoot) return bytesutil.ToBytes32(blockRoot)
} }
@@ -90,11 +92,12 @@ func (k *Store) HasArchivedPoint(ctx context.Context, index uint64) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
defer span.End() defer span.End()
var exists bool var exists bool
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
iBucket := tx.Bucket(archivedIndexRootBucket) iBucket := tx.Bucket(archivedIndexRootBucket)
exists = iBucket.Get(bytesutil.Uint64ToBytes(index)) != nil exists = iBucket.Get(bytesutil.Uint64ToBytes(index)) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }

View File

@@ -83,12 +83,13 @@ func (k *Store) HasAttestation(ctx context.Context, attDataRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttestation") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttestation")
defer span.End() defer span.End()
exists := false exists := false
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(attestationsBucket) bkt := tx.Bucket(attestationsBucket)
exists = bkt.Get(attDataRoot[:]) != nil exists = bkt.Get(attDataRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }
@@ -281,19 +282,34 @@ func createAttestationIndicesFromFilters(f *filters.QueryFilter) (map[string][]b
for k, v := range f.Filters() { for k, v := range f.Filters() {
switch k { switch k {
case filters.HeadBlockRoot: case filters.HeadBlockRoot:
headBlockRoot := v.([]byte) headBlockRoot, ok := v.([]byte)
if !ok {
return nil, errors.New("headBlockRoot is not type []byte")
}
indicesByBucket[string(attestationHeadBlockRootBucket)] = headBlockRoot indicesByBucket[string(attestationHeadBlockRootBucket)] = headBlockRoot
case filters.SourceRoot: case filters.SourceRoot:
sourceRoot := v.([]byte) sourceRoot, ok := v.([]byte)
if !ok {
return nil, errors.New("sourceRoot is not type []byte")
}
indicesByBucket[string(attestationSourceRootIndicesBucket)] = sourceRoot indicesByBucket[string(attestationSourceRootIndicesBucket)] = sourceRoot
case filters.SourceEpoch: case filters.SourceEpoch:
sourceEpoch := v.(uint64) sourceEpoch, ok := v.(uint64)
if !ok {
return nil, errors.New("sourceEpoch is not type uint64")
}
indicesByBucket[string(attestationSourceEpochIndicesBucket)] = bytesutil.Uint64ToBytes(sourceEpoch) indicesByBucket[string(attestationSourceEpochIndicesBucket)] = bytesutil.Uint64ToBytes(sourceEpoch)
case filters.TargetEpoch: case filters.TargetEpoch:
targetEpoch := v.(uint64) targetEpoch, ok := v.(uint64)
if !ok {
return nil, errors.New("targetEpoch is not type uint64")
}
indicesByBucket[string(attestationTargetEpochIndicesBucket)] = bytesutil.Uint64ToBytes(targetEpoch) indicesByBucket[string(attestationTargetEpochIndicesBucket)] = bytesutil.Uint64ToBytes(targetEpoch)
case filters.TargetRoot: case filters.TargetRoot:
targetRoot := v.([]byte) targetRoot, ok := v.([]byte)
if !ok {
return nil, errors.New("targetRoot is not type []byte")
}
indicesByBucket[string(attestationTargetRootIndicesBucket)] = targetRoot indicesByBucket[string(attestationTargetRootIndicesBucket)] = targetRoot
default: default:
return nil, fmt.Errorf("filter criterion %v not supported for attestations", k) return nil, fmt.Errorf("filter criterion %v not supported for attestations", k)

View File

@@ -39,7 +39,11 @@ func (k *Store) Backup(ctx context.Context) error {
if err != nil { if err != nil {
panic(err) panic(err)
} }
defer copyDB.Close() defer func() {
if err := copyDB.Close(); err != nil {
logrus.WithError(err).Error("Failed to close destination database")
}
}()
return k.db.View(func(tx *bolt.Tx) error { return k.db.View(func(tx *bolt.Tx) error {
return tx.ForEach(func(name []byte, b *bolt.Bucket) error { return tx.ForEach(func(name []byte, b *bolt.Bucket) error {

View File

@@ -120,12 +120,13 @@ func (k *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
return true return true
} }
exists := false exists := false
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket) bkt := tx.Bucket(blocksBucket)
exists = bkt.Get(blockRoot[:]) != nil exists = bkt.Get(blockRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }
@@ -590,7 +591,10 @@ func createBlockIndicesFromFilters(f *filters.QueryFilter) (map[string][]byte, e
for k, v := range f.Filters() { for k, v := range f.Filters() {
switch k { switch k {
case filters.ParentRoot: case filters.ParentRoot:
parentRoot := v.([]byte) parentRoot, ok := v.([]byte)
if !ok {
return nil, errors.New("parent root is not []byte")
}
indicesByBucket[string(blockParentRootIndicesBucket)] = parentRoot indicesByBucket[string(blockParentRootIndicesBucket)] = parentRoot
case filters.StartSlot: case filters.StartSlot:
case filters.EndSlot: case filters.EndSlot:

View File

@@ -20,12 +20,14 @@ func (kv *Store) ensureNewStateServiceCompatible(ctx context.Context) error {
} }
var historicalStateDeleted bool var historicalStateDeleted bool
kv.db.View(func(tx *bolt.Tx) error { if err := kv.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(newStateServiceCompatibleBucket) bkt := tx.Bucket(newStateServiceCompatibleBucket)
v := bkt.Get(historicalStateDeletedKey) v := bkt.Get(historicalStateDeletedKey)
historicalStateDeleted = len(v) == 1 && v[0] == 0x01 historicalStateDeleted = len(v) == 1 && v[0] == 0x01
return nil return nil
}) }); err != nil {
return err
}
regenHistoricalStatesConfirmed := false regenHistoricalStatesConfirmed := false
var err error var err error

View File

@@ -15,12 +15,13 @@ func (k *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress") ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
defer span.End() defer span.End()
var addr []byte var addr []byte
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
chainInfo := tx.Bucket(chainMetadataBucket) chainInfo := tx.Bucket(chainMetadataBucket)
addr = chainInfo.Get(depositContractAddressKey) addr = chainInfo.Get(depositContractAddressKey)
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return addr, nil return addr, nil
} }

View File

@@ -31,12 +31,13 @@ func (k *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
defer span.End() defer span.End()
exists := false exists := false
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(voluntaryExitsBucket) bkt := tx.Bucket(voluntaryExitsBucket)
exists = bkt.Get(exitRoot[:]) != nil exists = bkt.Get(exitRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }

View File

@@ -148,7 +148,9 @@ func regenHistoricalStateProcessSlots(ctx context.Context, state *stateTrie.Beac
return nil, errors.Wrap(err, "could not process epoch with optimizations") return nil, errors.Wrap(err, "could not process epoch with optimizations")
} }
} }
state.SetSlot(state.Slot() + 1) if err := state.SetSlot(state.Slot() + 1); err != nil {
return nil, err
}
} }
return state, nil return state, nil
} }

View File

@@ -31,12 +31,13 @@ func (k *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte)
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
defer span.End() defer span.End()
exists := false exists := false
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(proposerSlashingsBucket) bkt := tx.Bucket(proposerSlashingsBucket)
exists = bkt.Get(slashingRoot[:]) != nil exists = bkt.Get(slashingRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }
@@ -90,12 +91,13 @@ func (k *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte)
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
defer span.End() defer span.End()
exists := false exists := false
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(attesterSlashingsBucket) bkt := tx.Bucket(attesterSlashingsBucket)
exists = bkt.Get(slashingRoot[:]) != nil exists = bkt.Get(slashingRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }

View File

@@ -163,12 +163,13 @@ func (k *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState")
defer span.End() defer span.End()
var exists bool var exists bool
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket) bucket := tx.Bucket(stateBucket)
exists = bucket.Get(blockRoot[:]) != nil exists = bucket.Get(blockRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }

View File

@@ -67,11 +67,12 @@ func (k *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary") ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
defer span.End() defer span.End()
var exists bool var exists bool
// #nosec G104. Always returns nil. if err := k.db.View(func(tx *bolt.Tx) error {
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket) bucket := tx.Bucket(stateSummaryBucket)
exists = bucket.Get(blockRoot[:]) != nil exists = bucket.Get(blockRoot[:]) != nil
return nil return nil
}) }); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err)
}
return exists return exists
} }

View File

@@ -222,7 +222,9 @@ func (cm *BasicConnMgr) TrimOpenConns(ctx context.Context) {
for _, c := range cm.getConnsToClose(ctx) { for _, c := range cm.getConnsToClose(ctx) {
log.Info("closing conn: ", c.RemotePeer()) log.Info("closing conn: ", c.RemotePeer())
log.Event(ctx, "closeConn", c.RemotePeer()) log.Event(ctx, "closeConn", c.RemotePeer())
c.Close() if err := c.Close(); err != nil {
log.Errorf("Failed to close connection: %v", err)
}
} }
cm.lastTrim = roughtime.Now() cm.lastTrim = roughtime.Now()

View File

@@ -66,11 +66,14 @@ type Service struct {
func NewService(cfg *Config) (*Service, error) { func NewService(cfg *Config) (*Service, error) {
var err error var err error
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cache, _ := ristretto.NewCache(&ristretto.Config{ cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000, NumCounters: 1000,
MaxCost: 1000, MaxCost: 1000,
BufferItems: 64, BufferItems: 64,
}) })
if err != nil {
return nil, err
}
s := &Service{ s := &Service{
ctx: ctx, ctx: ctx,

View File

@@ -172,7 +172,9 @@ func (b *blockCache) AddBlock(blk *gethTypes.Block) error {
func trim(queue *cache.FIFO, maxSize int) { func trim(queue *cache.FIFO, maxSize int) {
for s := len(queue.ListKeys()); s > maxSize; s-- { for s := len(queue.ListKeys()); s > maxSize; s-- {
// #nosec G104 popProcessNoopFunc never returns an error // #nosec G104 popProcessNoopFunc never returns an error
_, _ = queue.Pop(popProcessNoopFunc) if _, err := queue.Pop(popProcessNoopFunc); err != nil { // This never returns an error, but we'll handle anyway for sanity.
panic(err)
}
} }
} }

View File

@@ -9,7 +9,9 @@ import (
func (s *Service) processDeposit(eth1Data *ethpb.Eth1Data, deposit *ethpb.Deposit) error { func (s *Service) processDeposit(eth1Data *ethpb.Eth1Data, deposit *ethpb.Deposit) error {
var err error var err error
s.preGenesisState.SetEth1Data(eth1Data) if err := s.preGenesisState.SetEth1Data(eth1Data); err != nil {
return err
}
s.preGenesisState, err = blocks.ProcessPreGenesisDeposit(context.Background(), s.preGenesisState, deposit) s.preGenesisState, err = blocks.ProcessPreGenesisDeposit(context.Background(), s.preGenesisState, deposit)
return err return err
} }

View File

@@ -448,7 +448,10 @@ func (s *Service) checkHeaderRange(start uint64, end uint64,
} }
func (s *Service) checkForChainstart(blockHash [32]byte, blockNumber *big.Int, blockTime uint64) { func (s *Service) checkForChainstart(blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
valCount, _ := helpers.ActiveValidatorCount(s.preGenesisState, 0) valCount, err := helpers.ActiveValidatorCount(s.preGenesisState, 0)
if err != nil {
log.WithError(err).Error("Could not determine active validator count from pref genesis state")
}
triggered := state.IsValidGenesisState(valCount, s.createGenesisTime(blockTime)) triggered := state.IsValidGenesisState(valCount, s.createGenesisTime(blockTime))
if triggered { if triggered {
s.chainStartData.GenesisTime = s.createGenesisTime(blockTime) s.chainStartData.GenesisTime = s.createGenesisTime(blockTime)

View File

@@ -499,7 +499,9 @@ func (s *Service) batchRequestHeaders(startBlock uint64, endBlock uint64) ([]*ge
} }
for _, h := range headers { for _, h := range headers {
if h != nil { if h != nil {
s.blockCache.AddBlock(gethTypes.NewBlockWithHeader(h)) if err := s.blockCache.AddBlock(gethTypes.NewBlockWithHeader(h)); err != nil {
return nil, err
}
} }
} }
return headers, nil return headers, nil

View File

@@ -60,6 +60,7 @@ func TestServer_ListAssignments_NoResults(t *testing.T) {
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0, Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{},
}) })
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -25,7 +25,9 @@ func (bs *Server) SubmitProposerSlashing(
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err) return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
} }
if featureconfig.Get().BroadcastSlashings { if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req) if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
return nil, err
}
} }
return &ethpb.SubmitSlashingResponse{ return &ethpb.SubmitSlashingResponse{
SlashedIndices: []uint64{req.ProposerIndex}, SlashedIndices: []uint64{req.ProposerIndex},
@@ -47,7 +49,9 @@ func (bs *Server) SubmitAttesterSlashing(
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err) return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
} }
if featureconfig.Get().BroadcastSlashings { if featureconfig.Get().BroadcastSlashings {
bs.Broadcaster.Broadcast(ctx, req) if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
return nil, err
}
} }
slashedIndices := sliceutil.IntersectionUint64(req.Attestation_1.AttestingIndices, req.Attestation_2.AttestingIndices) slashedIndices := sliceutil.IntersectionUint64(req.Attestation_1.AttestingIndices, req.Attestation_2.AttestingIndices)
return &ethpb.SubmitSlashingResponse{ return &ethpb.SubmitSlashingResponse{

View File

@@ -2,6 +2,7 @@ package beacon
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
@@ -331,7 +332,11 @@ func (is *infostream) generatePendingValidatorInfo(info *ethpb.ValidatorInfo) (*
is.eth1DepositsMutex.Lock() is.eth1DepositsMutex.Lock()
if fetchedDeposit, exists := is.eth1Deposits.Get(key); exists { if fetchedDeposit, exists := is.eth1Deposits.Get(key); exists {
eth1DepositCacheHits.Inc() eth1DepositCacheHits.Inc()
deposit = fetchedDeposit.(*eth1Deposit) var ok bool
deposit, ok = fetchedDeposit.(*eth1Deposit)
if !ok {
return nil, errors.New("cached eth1 deposit is not type *eth1Deposit")
}
} else { } else {
eth1DepositCacheMisses.Inc() eth1DepositCacheMisses.Inc()
fetchedDeposit, eth1BlockNumber := is.depositFetcher.DepositByPubkey(is.ctx, info.PublicKey) fetchedDeposit, eth1BlockNumber := is.depositFetcher.DepositByPubkey(is.ctx, info.PublicKey)
@@ -398,7 +403,10 @@ func (is *infostream) calculateActivationTimeForPendingValidators(res []*ethpb.V
// Loop over epochs, roughly simulating progression. // Loop over epochs, roughly simulating progression.
for curEpoch := epoch + 1; len(sortedIndices) > 0 && len(pendingValidators) > 0; curEpoch++ { for curEpoch := epoch + 1; len(sortedIndices) > 0 && len(pendingValidators) > 0; curEpoch++ {
toProcess, _ := helpers.ValidatorChurnLimit(numAttestingValidators) toProcess, err := helpers.ValidatorChurnLimit(numAttestingValidators)
if err != nil {
log.WithError(err).Error("Failed to determine validator churn limit")
}
if toProcess > uint64(len(sortedIndices)) { if toProcess > uint64(len(sortedIndices)) {
toProcess = uint64(len(sortedIndices)) toProcess = uint64(len(sortedIndices))
} }
@@ -491,7 +499,11 @@ func (is *infostream) depositQueueTimestamp(eth1BlockNumber *big.Int) (uint64, e
is.eth1BlocktimesMutex.Lock() is.eth1BlocktimesMutex.Lock()
if cachedTimestamp, exists := is.eth1Blocktimes.Get(key); exists { if cachedTimestamp, exists := is.eth1Blocktimes.Get(key); exists {
eth1BlocktimeCacheHits.Inc() eth1BlocktimeCacheHits.Inc()
blockTimestamp = cachedTimestamp.(uint64) var ok bool
blockTimestamp, ok = cachedTimestamp.(uint64)
if !ok {
return 0, errors.New("cached timestamp is not type uint64")
}
} else { } else {
eth1BlocktimeCacheMisses.Inc() eth1BlocktimeCacheMisses.Inc()
var err error var err error

View File

@@ -324,7 +324,9 @@ func (s *Service) Stop() error {
log.Debug("Initiated graceful stop of gRPC server") log.Debug("Initiated graceful stop of gRPC server")
} }
if s.slasherConn != nil { if s.slasherConn != nil {
s.slasherConn.Close() if err := s.slasherConn.Close(); err != nil {
return err
}
} }
return nil return nil
} }

View File

@@ -2,6 +2,7 @@ package validator
import ( import (
"context" "context"
"errors"
"time" "time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -19,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil" "github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace" "go.opencensus.io/trace"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@@ -201,7 +203,7 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
// waitToOneThird waits until one-third of the way through the slot // waitToOneThird waits until one-third of the way through the slot
// or the head slot equals to the input slot. // or the head slot equals to the input slot.
func (vs *Server) waitToOneThird(ctx context.Context, slot uint64) { func (vs *Server) waitToOneThird(ctx context.Context, slot uint64) {
_, span := trace.StartSpan(ctx, "validator.waitToOneThird") ctx, span := trace.StartSpan(ctx, "validator.waitToOneThird")
defer span.End() defer span.End()
// Don't need to wait if current slot is greater than requested slot. // Don't need to wait if current slot is greater than requested slot.
@@ -224,7 +226,13 @@ func (vs *Server) waitToOneThird(ctx context.Context, slot uint64) {
case event := <-stateChannel: case event := <-stateChannel:
// Node processed a block, check if the processed block is the same as input slot. // Node processed a block, check if the processed block is the same as input slot.
if event.Type == statefeed.BlockProcessed { if event.Type == statefeed.BlockProcessed {
d := event.Data.(*statefeed.BlockProcessedData) d, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
err := errors.New("event feed is not type *statefeed.BlockProcessedData")
traceutil.AnnotateError(span, err)
log.Error(err)
continue
}
if slot == d.Slot { if slot == d.Slot {
return return
} }

View File

@@ -325,7 +325,9 @@ func (vs *Server) canonicalEth1Data(ctx context.Context, beaconState *stateTrie.
var eth1BlockHash [32]byte var eth1BlockHash [32]byte
// Add in current vote, to get accurate vote tally // Add in current vote, to get accurate vote tally
beaconState.AppendEth1DataVotes(currentVote) if err := beaconState.AppendEth1DataVotes(currentVote); err != nil {
return nil, nil, errors.Wrap(err, "failed to append eth1 data votes to state")
}
hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote) hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote)
if err != nil { if err != nil {
return nil, nil, errors.Wrap(err, "could not determine if current eth1data vote has enough support") return nil, nil, errors.Wrap(err, "could not determine if current eth1data vote has enough support")

View File

@@ -2,6 +2,7 @@ package validator
import ( import (
"context" "context"
"errors"
"time" "time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
@@ -171,7 +172,10 @@ func (vs *Server) WaitForChainStart(req *ptypes.Empty, stream ethpb.BeaconNodeVa
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.ChainStarted { if event.Type == statefeed.ChainStarted {
data := event.Data.(*statefeed.ChainStartedData) data, ok := event.Data.(*statefeed.ChainStartedData)
if !ok {
return errors.New("event data is not type *statefeed.ChainStartData")
}
log.WithField("starttime", data.StartTime).Debug("Received chain started event") log.WithField("starttime", data.StartTime).Debug("Received chain started event")
log.Info("Sending genesis time notification to connected validator clients") log.Info("Sending genesis time notification to connected validator clients")
res := &ethpb.ChainStartResponse{ res := &ethpb.ChainStartResponse{

View File

@@ -43,7 +43,11 @@ func (b *BeaconState) SetFork(val *pbp2p.Fork) error {
b.lock.Lock() b.lock.Lock()
defer b.lock.Unlock() defer b.lock.Unlock()
b.state.Fork = proto.Clone(val).(*pbp2p.Fork) fk, ok := proto.Clone(val).(*pbp2p.Fork)
if !ok {
return errors.New("proto.Clone did not return a fork proto")
}
b.state.Fork = fk
b.markFieldAsDirty(fork) b.markFieldAsDirty(fork)
return nil return nil
} }

View File

@@ -222,7 +222,9 @@ func processSlotsStateGen(ctx context.Context, state *stateTrie.BeaconState, slo
return nil, errors.Wrap(err, "could not process epoch with optimizations") return nil, errors.Wrap(err, "could not process epoch with optimizations")
} }
} }
state.SetSlot(state.Slot() + 1) if err := state.SetSlot(state.Slot() + 1); err != nil {
return nil, err
}
} }
return state, nil return state, nil

View File

@@ -21,12 +21,15 @@ var nocachedHasher *stateRootHasher
var cachedHasher *stateRootHasher var cachedHasher *stateRootHasher
func init() { func init() {
rootsCache, _ := ristretto.NewCache(&ristretto.Config{ rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M). NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB). MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory. // 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer. BufferItems: 64, // number of keys per Get buffer.
}) })
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved. // Temporarily disable roots cache until cache issues can be resolved.
cachedHasher = &stateRootHasher{rootsCache: rootsCache} cachedHasher = &stateRootHasher{rootsCache: rootsCache}
nocachedHasher = &stateRootHasher{} nocachedHasher = &stateRootHasher{}

View File

@@ -17,11 +17,15 @@ func setRPCStreamDeadlines(stream network.Stream) {
func setStreamReadDeadline(stream network.Stream, duration time.Duration) { func setStreamReadDeadline(stream network.Stream, duration time.Duration) {
// libp2p uses the system clock time for determining the deadline so we use // libp2p uses the system clock time for determining the deadline so we use
// time.Now() instead of the synchronized roughtime.Now(). // time.Now() instead of the synchronized roughtime.Now().
stream.SetReadDeadline(time.Now().Add(duration)) if err := stream.SetReadDeadline(time.Now().Add(duration)); err != nil {
log.WithError(err).Error("Failed to set stream deadline")
}
} }
func setStreamWriteDeadline(stream network.Stream, duration time.Duration) { func setStreamWriteDeadline(stream network.Stream, duration time.Duration) {
// libp2p uses the system clock time for determining the deadline so we use // libp2p uses the system clock time for determining the deadline so we use
// time.Now() instead of the synchronized roughtime.Now(). // time.Now() instead of the synchronized roughtime.Now().
stream.SetWriteDeadline(time.Now().Add(duration)) if err := stream.SetWriteDeadline(time.Now().Add(duration)); err != nil {
log.WithError(err).Error("Failed to set stream deadline")
}
} }

View File

@@ -312,7 +312,11 @@ func (s *Service) requestBlocks(ctx context.Context, req *p2ppb.BeaconBlocksByRa
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to send request to peer") return nil, errors.Wrap(err, "failed to send request to peer")
} }
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
resp := make([]*eth.SignedBeaconBlock, 0, req.Count) resp := make([]*eth.SignedBeaconBlock, 0, req.Count)
for { for {

View File

@@ -86,7 +86,11 @@ func (s *Service) Start() {
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.Initialized { if event.Type == statefeed.Initialized {
data := event.Data.(*statefeed.InitializedData) data, ok := event.Data.(*statefeed.InitializedData)
if !ok {
log.Error("event data is not type *statefeed.InitializedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received state initialized event") log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
genesis = data.StartTime genesis = data.StartTime
genesisSet = true genesisSet = true

View File

@@ -383,7 +383,11 @@ func (f *blocksFetcher) requestBlocks(
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
resp := make([]*eth.SignedBeaconBlock, 0, req.Count) resp := make([]*eth.SignedBeaconBlock, 0, req.Count)
for { for {

View File

@@ -25,9 +25,10 @@ const (
) )
var ( var (
errQueueCtxIsDone = errors.New("queue's context is done, reinitialize") errQueueCtxIsDone = errors.New("queue's context is done, reinitialize")
errQueueTakesTooLongToStop = errors.New("queue takes too long to stop") errQueueTakesTooLongToStop = errors.New("queue takes too long to stop")
errNoEpochState = errors.New("epoch state not found") errNoEpochState = errors.New("epoch state not found")
errInputNotFetchRequestParams = errors.New("input data is not type *fetchRequestParams")
) )
// blocksProvider exposes enough methods for queue to fetch incoming blocks. // blocksProvider exposes enough methods for queue to fetch incoming blocks.
@@ -221,7 +222,10 @@ func (q *blocksQueue) loop() {
// onScheduleEvent is an event called on newly arrived epochs. Transforms state to scheduled. // onScheduleEvent is an event called on newly arrived epochs. Transforms state to scheduled.
func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn { func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
return func(es *epochState, in interface{}) (stateID, error) { return func(es *epochState, in interface{}) (stateID, error) {
data := in.(*fetchRequestParams) data, ok := in.(*fetchRequestParams)
if !ok {
return 0, errInputNotFetchRequestParams
}
if err := q.blocksFetcher.scheduleRequest(ctx, data.start, data.count); err != nil { if err := q.blocksFetcher.scheduleRequest(ctx, data.start, data.count); err != nil {
return es.state, err return es.state, err
} }
@@ -236,7 +240,10 @@ func (q *blocksQueue) onDataReceivedEvent(ctx context.Context) eventHandlerFn {
return es.state, ctx.Err() return es.state, ctx.Err()
} }
response := in.(*fetchRequestResponse) response, ok := in.(*fetchRequestResponse)
if !ok {
return 0, errInputNotFetchRequestParams
}
epoch := helpers.SlotToEpoch(response.start) epoch := helpers.SlotToEpoch(response.start)
if response.err != nil { if response.err != nil {
// Current window is already too big, re-request previous epochs. // Current window is already too big, re-request previous epochs.
@@ -267,7 +274,10 @@ func (q *blocksQueue) onReadyToSendEvent(ctx context.Context) eventHandlerFn {
return es.state, ctx.Err() return es.state, ctx.Err()
} }
data := in.(*fetchRequestParams) data, ok := in.(*fetchRequestParams)
if !ok {
return 0, errInputNotFetchRequestParams
}
epoch := helpers.SlotToEpoch(data.start) epoch := helpers.SlotToEpoch(data.start)
ind, ok := q.state.findEpochState(epoch) ind, ok := q.state.findEpochState(epoch)
if !ok { if !ok {
@@ -317,7 +327,10 @@ func (q *blocksQueue) onExtendWindowEvent(ctx context.Context) eventHandlerFn {
return es.state, ctx.Err() return es.state, ctx.Err()
} }
data := in.(*fetchRequestParams) data, ok := in.(*fetchRequestParams)
if !ok {
return 0, errInputNotFetchRequestParams
}
epoch := helpers.SlotToEpoch(data.start) epoch := helpers.SlotToEpoch(data.start)
if _, ok := q.state.findEpochState(epoch); !ok { if _, ok := q.state.findEpochState(epoch); !ok {
return es.state, errNoEpochState return es.state, errNoEpochState

View File

@@ -137,7 +137,11 @@ func (s *Service) requestBlocks(ctx context.Context, req *p2ppb.BeaconBlocksByRa
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to send request to peer") return nil, errors.Wrap(err, "failed to send request to peer")
} }
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
resp := make([]*eth.SignedBeaconBlock, 0, req.Count) resp := make([]*eth.SignedBeaconBlock, 0, req.Count)
for { for {

View File

@@ -86,7 +86,11 @@ func (s *Service) Start() {
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.Initialized { if event.Type == statefeed.Initialized {
data := event.Data.(*statefeed.InitializedData) data, ok := event.Data.(*statefeed.InitializedData)
if !ok {
log.Error("Event feed data is not type *statefeed.InitializedData")
continue
}
log.WithField("starttime", data.StartTime).Debug("Received state initialized event") log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
genesis = data.StartTime genesis = data.StartTime
genesisSet = true genesisSet = true

View File

@@ -6,6 +6,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bls"
@@ -125,7 +126,11 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
pid := pids[rand.Int()%len(pids)] pid := pids[rand.Int()%len(pids)]
targetSlot := helpers.SlotToEpoch(attestations[0].Aggregate.Data.Target.Epoch) targetSlot := helpers.SlotToEpoch(attestations[0].Aggregate.Data.Target.Epoch)
for _, p := range pids { for _, p := range pids {
if cs, _ := s.p2p.Peers().ChainState(p); cs != nil && cs.HeadSlot >= targetSlot { cs, err := s.p2p.Peers().ChainState(p)
if err != nil {
return errors.Wrap(err, "could not get chain state for peer")
}
if cs != nil && cs.HeadSlot >= targetSlot {
pid = p pid = p
break break
} }

View File

@@ -28,7 +28,9 @@ func (r *Service) processPendingBlocksQueue() {
locker := new(sync.Mutex) locker := new(sync.Mutex)
runutil.RunEvery(r.ctx, processPendingBlocksPeriod, func() { runutil.RunEvery(r.ctx, processPendingBlocksPeriod, func() {
locker.Lock() locker.Lock()
r.processPendingBlocks(ctx) if err := r.processPendingBlocks(ctx); err != nil {
log.WithError(err).Error("Failed to process pending blocks")
}
locker.Unlock() locker.Unlock()
}) })
} }
@@ -80,7 +82,11 @@ func (r *Service) processPendingBlocks(ctx context.Context) error {
// have a head slot newer than the block slot we are requesting. // have a head slot newer than the block slot we are requesting.
pid := pids[rand.Int()%len(pids)] pid := pids[rand.Int()%len(pids)]
for _, p := range pids { for _, p := range pids {
if cs, _ := r.p2p.Peers().ChainState(p); cs != nil && cs.HeadSlot >= uint64(s) { cs, err := r.p2p.Peers().ChainState(p)
if err != nil {
return errors.Wrap(err, "failed to read chain state for peer")
}
if cs != nil && cs.HeadSlot >= uint64(s) {
pid = p pid = p
break break
} }

View File

@@ -59,7 +59,11 @@ func (r *Service) registerRPC(topic string, base interface{}, handle rpcHandler)
r.p2p.SetStreamHandler(topic, func(stream network.Stream) { r.p2p.SetStreamHandler(topic, func(stream network.Stream) {
ctx, cancel := context.WithTimeout(context.Background(), ttfbTimeout) ctx, cancel := context.WithTimeout(context.Background(), ttfbTimeout)
defer cancel() defer cancel()
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
ctx, span := trace.StartSpan(ctx, "sync.rpc") ctx, span := trace.StartSpan(ctx, "sync.rpc")
defer span.End() defer span.End()
span.AddAttributes(trace.StringAttribute("topic", topic)) span.AddAttributes(trace.StringAttribute("topic", topic))

View File

@@ -17,13 +17,20 @@ import (
func (r *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { func (r *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.BeaconBlocksByRangeHandler") ctx, span := trace.StartSpan(ctx, "sync.BeaconBlocksByRangeHandler")
defer span.End() defer span.End()
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
setRPCStreamDeadlines(stream) setRPCStreamDeadlines(stream)
log := log.WithField("handler", "beacon_blocks_by_range") log := log.WithField("handler", "beacon_blocks_by_range")
m := msg.(*pb.BeaconBlocksByRangeRequest) m, ok := msg.(*pb.BeaconBlocksByRangeRequest)
if !ok {
return errors.New("message is not type *pb.BeaconBlockByRangeRequest")
}
startSlot := m.StartSlot startSlot := m.StartSlot
endSlot := startSlot + (m.Step * (m.Count - 1)) endSlot := startSlot + (m.Step * (m.Count - 1))
@@ -42,7 +49,11 @@ func (r *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer())
if r.p2p.Peers().IsBad(stream.Conn().RemotePeer()) { if r.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
log.Debug("Disconnecting bad peer") log.Debug("Disconnecting bad peer")
defer r.p2p.Disconnect(stream.Conn().RemotePeer()) defer func() {
if err := r.p2p.Disconnect(stream.Conn().RemotePeer()); err != nil {
log.WithError(err).Error("Failed to disconnect peer")
}
}()
} }
resp, err := r.generateErrorResponse(responseCodeInvalidRequest, rateLimitedError) resp, err := r.generateErrorResponse(responseCodeInvalidRequest, rateLimitedError)
if err != nil { if err != nil {

View File

@@ -46,13 +46,20 @@ func (r *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
// beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots. // beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots.
func (r *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { func (r *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
setRPCStreamDeadlines(stream) setRPCStreamDeadlines(stream)
log := log.WithField("handler", "beacon_blocks_by_root") log := log.WithField("handler", "beacon_blocks_by_root")
blockRoots := msg.([][32]byte) blockRoots, ok := msg.([][32]byte)
if !ok {
return errors.New("message is not type [][32]byte")
}
if len(blockRoots) == 0 { if len(blockRoots) == 0 {
resp, err := r.generateErrorResponse(responseCodeInvalidRequest, "no block roots provided in request") resp, err := r.generateErrorResponse(responseCodeInvalidRequest, "no block roots provided in request")
if err != nil { if err != nil {
@@ -69,7 +76,11 @@ func (r *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer())
if r.p2p.Peers().IsBad(stream.Conn().RemotePeer()) { if r.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
log.Debug("Disconnecting bad peer") log.Debug("Disconnecting bad peer")
defer r.p2p.Disconnect(stream.Conn().RemotePeer()) defer func() {
if err := r.p2p.Disconnect(stream.Conn().RemotePeer()); err != nil {
log.WithError(err).Error("Failed to disconnect peer")
}
}()
} }
resp, err := r.generateErrorResponse(responseCodeInvalidRequest, rateLimitedError) resp, err := r.generateErrorResponse(responseCodeInvalidRequest, rateLimitedError)
if err != nil { if err != nil {

View File

@@ -22,7 +22,11 @@ var goodByes = map[uint64]string{
// goodbyeRPCHandler reads the incoming goodbye rpc message from the peer. // goodbyeRPCHandler reads the incoming goodbye rpc message from the peer.
func (r *Service) goodbyeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { func (r *Service) goodbyeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
setRPCStreamDeadlines(stream) setRPCStreamDeadlines(stream)

View File

@@ -119,12 +119,19 @@ func (r *Service) removeDisconnectedPeerStatus(ctx context.Context, pid peer.ID)
// statusRPCHandler reads the incoming Status RPC from the peer and responds with our version of a status message. // statusRPCHandler reads the incoming Status RPC from the peer and responds with our version of a status message.
// This handler will disconnect any peer that does not match our fork version. // This handler will disconnect any peer that does not match our fork version.
func (r *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { func (r *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
defer stream.Close() defer func() {
if err := stream.Close(); err != nil {
log.WithError(err).Error("Failed to close stream")
}
}()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
setRPCStreamDeadlines(stream) setRPCStreamDeadlines(stream)
log := log.WithField("handler", "status") log := log.WithField("handler", "status")
m := msg.(*pb.Status) m, ok := msg.(*pb.Status)
if !ok {
return errors.New("message is not type *pb.Status")
}
if err := r.validateStatusMessage(m, stream); err != nil { if err := r.validateStatusMessage(m, stream); err != nil {
log.WithField("peer", stream.Conn().RemotePeer()).Debug("Invalid fork version from peer") log.WithField("peer", stream.Conn().RemotePeer()).Debug("Invalid fork version from peer")
@@ -139,7 +146,9 @@ func (r *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
log.WithError(err).Debug("Failed to write to stream") log.WithError(err).Debug("Failed to write to stream")
} }
} }
stream.Close() // Close before disconnecting. if err := stream.Close(); err != nil { // Close before disconnecting.
log.WithError(err).Error("Failed to close stream")
}
// Add a short delay to allow the stream to flush before closing the connection. // Add a short delay to allow the stream to flush before closing the connection.
// There is still a chance that the peer won't receive the message. // There is still a chance that the peer won't receive the message.
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)

View File

@@ -49,7 +49,11 @@ func (r *Service) registerSubscribers() {
select { select {
case event := <-stateChannel: case event := <-stateChannel:
if event.Type == statefeed.Initialized { if event.Type == statefeed.Initialized {
data := event.Data.(*statefeed.InitializedData) data, ok := event.Data.(*statefeed.InitializedData)
if !ok {
log.Error("Event feed data is not type *statefeed.InitializedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received state initialized event") log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
if data.StartTime.After(roughtime.Now()) { if data.StartTime.After(roughtime.Now()) {
stateSub.Unsubscribe() stateSub.Unsubscribe()
@@ -243,7 +247,9 @@ func (r *Service) subscribeDynamicWithSubnets(
} }
if !wanted && v != nil { if !wanted && v != nil {
v.Cancel() v.Cancel()
r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, k)) if err := r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, k)); err != nil {
log.WithError(err).Error("Failed to unregister topic validator")
}
delete(subscriptions, k) delete(subscriptions, k)
} }
} }
@@ -312,7 +318,9 @@ func (r *Service) subscribeDynamic(topicFormat string, determineSubsLen func() i
subscriptions, cancelSubs = subscriptions[:wantedSubs-1], subscriptions[wantedSubs:] subscriptions, cancelSubs = subscriptions[:wantedSubs-1], subscriptions[wantedSubs:]
for i, sub := range cancelSubs { for i, sub := range cancelSubs {
sub.Cancel() sub.Cancel()
r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, i+wantedSubs)) if err := r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, i+wantedSubs)); err != nil {
log.WithError(err).Error("Failed to unregister topic validator")
}
} }
} else if len(subscriptions) < wantedSubs { // Increase topics } else if len(subscriptions) < wantedSubs { // Increase topics
for i := len(subscriptions); i < wantedSubs; i++ { for i := len(subscriptions); i < wantedSubs; i++ {

View File

@@ -15,7 +15,10 @@ import (
) )
func (r *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) error { func (r *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) error {
signed := msg.(*ethpb.SignedBeaconBlock) signed, ok := msg.(*ethpb.SignedBeaconBlock)
if !ok {
return errors.New("message is not type *ethpb.SignedBeaconBlock")
}
if signed == nil || signed.Block == nil { if signed == nil || signed.Block == nil {
return errors.New("nil block") return errors.New("nil block")

View File

@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
@@ -19,7 +20,11 @@ func (r *Service) committeeIndexBeaconAttestationSubscriber(ctx context.Context,
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg) return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
} }
if exists, _ := r.attPool.HasAggregatedAttestation(a); exists { exists, err := r.attPool.HasAggregatedAttestation(a)
if err != nil {
return errors.Wrap(err, "failed to determine if attestation pool has this atttestation")
}
if exists {
return nil return nil
} }

View File

@@ -31,7 +31,10 @@ type TestAccount struct {
// Setup creates the simulated backend with the deposit contract deployed // Setup creates the simulated backend with the deposit contract deployed
func Setup() (*TestAccount, error) { func Setup() (*TestAccount, error) {
genesis := make(core.GenesisAlloc) genesis := make(core.GenesisAlloc)
privKey, _ := crypto.GenerateKey() privKey, err := crypto.GenerateKey()
if err != nil {
return nil, err
}
pubKeyECDSA, ok := privKey.Public().(*ecdsa.PublicKey) pubKeyECDSA, ok := privKey.Public().(*ecdsa.PublicKey)
if !ok { if !ok {
return nil, fmt.Errorf("error casting public key to ECDSA") return nil, fmt.Errorf("error casting public key to ECDSA")

View File

@@ -98,7 +98,10 @@ func StartValidators(
t.Fatal(err) t.Fatal(err)
} }
deposits, _, _ := testutil.DeterministicDepositsAndKeys(uint64(validatorNum)) deposits, _, err := testutil.DeterministicDepositsAndKeys(uint64(validatorNum))
if err != nil {
t.Fatal(err)
}
_, roots, err := testutil.DeterministicDepositTrie(len(deposits)) _, roots, err := testutil.DeterministicDepositTrie(len(deposits))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -75,5 +75,16 @@
"beacon-chain/sync/deadlines\\.go": "Libp2p uses time.Now and this file sets a time based deadline in such a way that roughtime cannot be used", "beacon-chain/sync/deadlines\\.go": "Libp2p uses time.Now and this file sets a time based deadline in such a way that roughtime cannot be used",
"validator/client/grpc_interceptor\\.go": "Uses time.Now() for gRPC duration logging" "validator/client/grpc_interceptor\\.go": "Uses time.Now() for gRPC duration logging"
} }
},
"errcheck": {
"exclude_files": {
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*/.*_test\\.go": "TODO(5404): In a follow up PR",
"beacon-chain/p2p/testing/.*\\.go": "TODO(5404): In a follow up PR",
"shared/mock/.*\\.go": "Mocks are OK",
".*/.*mock\\.go": "Mocks are OK",
".*/testmain\\.go": "Test runner generated code"
}
} }
} }

View File

@@ -14,6 +14,7 @@ go_library(
"//shared/params:go_default_library", "//shared/params:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@herumi_bls_eth_go_binary//:go_default_library", "@herumi_bls_eth_go_binary//:go_default_library",
], ],
) )

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
) )
func init() { func init() {
@@ -212,8 +213,11 @@ func (s *Signature) VerifyAggregateCommon(pubKeys []*PublicKey, msg [32]byte, do
if len(pubKeys) == 0 { if len(pubKeys) == 0 {
return false return false
} }
//#nosec G104 aggregated, err := pubKeys[0].Copy()
aggregated, _ := pubKeys[0].Copy() if err != nil {
logrus.WithError(err).Error("Failed to copy public key")
return false
}
for i := 1; i < len(pubKeys); i++ { for i := 1; i < len(pubKeys); i++ {
aggregated.p.Add(pubKeys[i].p) aggregated.p.Add(pubKeys[i].p)

View File

@@ -280,7 +280,11 @@ func writeProfile(name, file string) error {
if err != nil { if err != nil {
return err return err
} }
defer f.Close() defer func() {
if err := f.Close(); err != nil {
log.WithError(err).Error("Failed to close pprof profile file.")
}
}()
return p.WriteTo(f, 0) return p.WriteTo(f, 0)
} }

View File

@@ -25,7 +25,10 @@ var sha256Pool = sync.Pool{New: func() interface{} {
// Hash defines a function that returns the sha256 checksum of the data passed in. // Hash defines a function that returns the sha256 checksum of the data passed in.
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#hash // https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#hash
func Hash(data []byte) [32]byte { func Hash(data []byte) [32]byte {
h := sha256Pool.Get().(hash.Hash) h, ok := sha256Pool.Get().(hash.Hash)
if !ok {
h = sha256.New()
}
defer sha256Pool.Put(h) defer sha256Pool.Put(h)
h.Reset() h.Reset()
@@ -49,8 +52,12 @@ func Hash(data []byte) [32]byte {
// Note: that this method is only more performant over // Note: that this method is only more performant over
// hashutil.Hash if the callback is used more than 5 times. // hashutil.Hash if the callback is used more than 5 times.
func CustomSHA256Hasher() func([]byte) [32]byte { func CustomSHA256Hasher() func([]byte) [32]byte {
hasher := sha256Pool.Get().(hash.Hash) hasher, ok := sha256Pool.Get().(hash.Hash)
hasher.Reset() if !ok {
hasher = sha256.New()
} else {
hasher.Reset()
}
var hash [32]byte var hash [32]byte
return func(data []byte) [32]byte { return func(data []byte) [32]byte {
@@ -76,7 +83,10 @@ var keccak256Pool = sync.Pool{New: func() interface{} {
func HashKeccak256(data []byte) [32]byte { func HashKeccak256(data []byte) [32]byte {
var b [32]byte var b [32]byte
h := keccak256Pool.Get().(hash.Hash) h, ok := keccak256Pool.Get().(hash.Hash)
if !ok {
h = sha3.NewLegacyKeccak256()
}
defer keccak256Pool.Put(h) defer keccak256Pool.Put(h)
h.Reset() h.Reset()

View File

@@ -277,7 +277,10 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
} else if cryptoJSON.KDF == "pbkdf2" { } else if cryptoJSON.KDF == "pbkdf2" {
c := ensureInt(cryptoJSON.KDFParams["c"]) c := ensureInt(cryptoJSON.KDFParams["c"])
prf := cryptoJSON.KDFParams["prf"].(string) prf, ok := cryptoJSON.KDFParams["prf"].(string)
if !ok {
return nil, errors.New("KDFParams are not type string")
}
if prf != "hmac-sha256" { if prf != "hmac-sha256" {
return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf) return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf)
} }

View File

@@ -33,7 +33,10 @@ func GetDoubleByteSlice(size int) [][]byte {
if rawObj == nil { if rawObj == nil {
return make([][]byte, size) return make([][]byte, size)
} }
byteSlice := rawObj.([][]byte) byteSlice, ok := rawObj.([][]byte)
if !ok {
return nil
}
if len(byteSlice) >= size { if len(byteSlice) >= size {
return byteSlice[:size] return byteSlice[:size]
} }
@@ -59,7 +62,10 @@ func GetBlockRootsTrie(size int) [][]*[32]byte {
if rawObj == nil { if rawObj == nil {
return make([][]*[32]byte, size) return make([][]*[32]byte, size)
} }
byteSlice := rawObj.([][]*[32]byte) byteSlice, ok := rawObj.([][]*[32]byte)
if !ok {
return nil
}
if len(byteSlice) >= size { if len(byteSlice) >= size {
return byteSlice[:size] return byteSlice[:size]
} }
@@ -85,7 +91,10 @@ func GetStateRootsTrie(size int) [][]*[32]byte {
if rawObj == nil { if rawObj == nil {
return make([][]*[32]byte, size) return make([][]*[32]byte, size)
} }
byteSlice := rawObj.([][]*[32]byte) byteSlice, ok := rawObj.([][]*[32]byte)
if !ok {
return nil
}
if len(byteSlice) >= size { if len(byteSlice) >= size {
return byteSlice[:size] return byteSlice[:size]
} }
@@ -111,7 +120,10 @@ func GetRandaoMixesTrie(size int) [][]*[32]byte {
if rawObj == nil { if rawObj == nil {
return make([][]*[32]byte, size) return make([][]*[32]byte, size)
} }
byteSlice := rawObj.([][]*[32]byte) byteSlice, ok := rawObj.([][]*[32]byte)
if !ok {
return nil
}
if len(byteSlice) >= size { if len(byteSlice) >= size {
return byteSlice[:size] return byteSlice[:size]
} }

View File

@@ -1,6 +1,8 @@
package prometheus package prometheus
import ( import (
"errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@@ -34,7 +36,10 @@ func NewLogrusCollector() *LogrusCollector {
func (hook *LogrusCollector) Fire(entry *logrus.Entry) error { func (hook *LogrusCollector) Fire(entry *logrus.Entry) error {
prefix := defaultprefix prefix := defaultprefix
if prefixValue, ok := entry.Data[prefixKey]; ok { if prefixValue, ok := entry.Data[prefixKey]; ok {
prefix = prefixValue.(string) prefix, ok = prefixValue.(string)
if !ok {
return errors.New("prefix is not a string")
}
} }
hook.counterVec.WithLabelValues(entry.Level.String(), prefix).Inc() hook.counterVec.WithLabelValues(entry.Level.String(), prefix).Inc()
return nil return nil

View File

@@ -90,10 +90,12 @@ func (s *Service) healthzHandler(w http.ResponseWriter, _ *http.Request) {
func (s *Service) goroutinezHandler(w http.ResponseWriter, _ *http.Request) { func (s *Service) goroutinezHandler(w http.ResponseWriter, _ *http.Request) {
stack := debug.Stack() stack := debug.Stack()
// #nosec G104 if _, err := w.Write(stack); err != nil {
w.Write(stack) log.WithError(err).Error("Failed to write goroutines stack")
// #nosec G104 }
pprof.Lookup("goroutine").WriteTo(w, 2) if err := pprof.Lookup("goroutine").WriteTo(w, 2); err != nil {
log.WithError(err).Error("Failed to write pprof goroutines")
}
} }
// Start the prometheus service. // Start the prometheus service.
@@ -103,7 +105,9 @@ func (s *Service) Start() {
addrParts := strings.Split(s.server.Addr, ":") addrParts := strings.Split(s.server.Addr, ":")
conn, err := net.DialTimeout("tcp", fmt.Sprintf("127.0.0.1:%s", addrParts[1]), time.Second) conn, err := net.DialTimeout("tcp", fmt.Sprintf("127.0.0.1:%s", addrParts[1]), time.Second)
if err == nil { if err == nil {
conn.Close() if err := conn.Close(); err != nil {
log.WithError(err).Error("Failed to close connection")
}
// Something on the port; we cannot use it. // Something on the port; we cannot use it.
log.WithField("address", s.server.Addr).Warn("Port already in use; cannot start prometheus service") log.WithField("address", s.server.Addr).Warn("Port already in use; cannot start prometheus service")
} else { } else {

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace" "go.opencensus.io/trace"
) )
@@ -52,8 +53,7 @@ func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID u
defer span.End() defer span.End()
prefix := encodeEpochValidatorID(epoch, validatorID) prefix := encodeEpochValidatorID(epoch, validatorID)
var hasBlockHeader bool var hasBlockHeader bool
// #nosec G104 if err := db.view(func(tx *bolt.Tx) error {
_ = db.view(func(tx *bolt.Tx) error {
c := tx.Bucket(historicBlockHeadersBucket).Cursor() c := tx.Bucket(historicBlockHeadersBucket).Cursor()
for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() { for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() {
hasBlockHeader = true hasBlockHeader = true
@@ -61,7 +61,9 @@ func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID u
} }
hasBlockHeader = false hasBlockHeader = false
return nil return nil
}) }); err != nil {
logrus.WithError(err).Error("Failed to lookup block header from DB")
}
return hasBlockHeader return hasBlockHeader
} }

View File

@@ -0,0 +1,31 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", "go_tool_library")
go_library(
name = "go_default_library",
srcs = ["analyzer.go"],
importpath = "github.com/prysmaticlabs/prysm/tools/analyzers/errcheck",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/ast/inspector:go_default_library",
],
)
go_tool_library(
name = "go_tool_library",
srcs = ["analyzer.go"],
importpath = "errcheck",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/ast/inspector:go_tool_library",
],
)
go_test(
name = "go_default_test",
srcs = ["embedded_walker_test.go"],
embed = [":go_default_library"],
)

View File

@@ -0,0 +1,442 @@
// Package errcheck implements an static analysis analyzer to ensure that errors are handled in go
// code. This analyzer was adapted from https://github.com/kisielk/errcheck (MIT License).
package errcheck
import (
"errors"
"fmt"
"go/ast"
"go/token"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
// Doc explaining the tool.
const Doc = "This tool enforces all errors must be handled and that type assertions test that " +
"the type implements the given interface to prevent runtime panics."
// Analyzer runs static analysis.
var Analyzer = &analysis.Analyzer{
Name: "errcheck",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
var exclusions = make(map[string]bool)
func init() {
for _, exc := range [...]string{
// bytes
"(*bytes.Buffer).Write",
"(*bytes.Buffer).WriteByte",
"(*bytes.Buffer).WriteRune",
"(*bytes.Buffer).WriteString",
// fmt
"fmt.Errorf",
"fmt.Print",
"fmt.Printf",
"fmt.Println",
"fmt.Fprint(*bytes.Buffer)",
"fmt.Fprintf(*bytes.Buffer)",
"fmt.Fprintln(*bytes.Buffer)",
"fmt.Fprint(*strings.Builder)",
"fmt.Fprintf(*strings.Builder)",
"fmt.Fprintln(*strings.Builder)",
"fmt.Fprint(os.Stderr)",
"fmt.Fprintf(os.Stderr)",
"fmt.Fprintln(os.Stderr)",
// math/rand
"math/rand.Read",
"(*math/rand.Rand).Read",
// hash
"(hash.Hash).Write",
} {
exclusions[exc] = true
}
}
func run(pass *analysis.Pass) (interface{}, error) {
inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
return nil, errors.New("analyzer is not type *inspector.Inspector")
}
nodeFilter := []ast.Node{
(*ast.CallExpr)(nil),
(*ast.ExprStmt)(nil),
(*ast.GoStmt)(nil),
(*ast.DeferStmt)(nil),
(*ast.AssignStmt)(nil),
}
inspect.Preorder(nodeFilter, func(node ast.Node) {
switch stmt := node.(type) {
case *ast.ExprStmt:
if call, ok := stmt.X.(*ast.CallExpr); ok {
if !ignoreCall(pass, call) && callReturnsError(pass, call) {
reportUnhandledError(pass, call.Lparen, call)
}
}
case *ast.GoStmt:
if !ignoreCall(pass, stmt.Call) && callReturnsError(pass, stmt.Call) {
reportUnhandledError(pass, stmt.Call.Lparen, stmt.Call)
}
case *ast.DeferStmt:
if !ignoreCall(pass, stmt.Call) && callReturnsError(pass, stmt.Call) {
reportUnhandledError(pass, stmt.Call.Lparen, stmt.Call)
}
case *ast.AssignStmt:
if len(stmt.Rhs) == 1 {
// single value on rhs; check against lhs identifiers
if call, ok := stmt.Rhs[0].(*ast.CallExpr); ok {
if ignoreCall(pass, call) {
break
}
isError := errorsByArg(pass, call)
for i := 0; i < len(stmt.Lhs); i++ {
if id, ok := stmt.Lhs[i].(*ast.Ident); ok {
// We shortcut calls to recover() because errorsByArg can't
// check its return types for errors since it returns interface{}.
if id.Name == "_" && (isRecover(pass, call) || isError[i]) {
reportUnhandledError(pass, id.NamePos, call)
}
}
}
} else if assert, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok {
if assert.Type == nil {
// type switch
break
}
if len(stmt.Lhs) < 2 {
// assertion result not read
reportUnhandledTypeAssertion(pass, stmt.Rhs[0].Pos())
} else if id, ok := stmt.Lhs[1].(*ast.Ident); ok && id.Name == "_" {
// assertion result ignored
reportUnhandledTypeAssertion(pass, id.NamePos)
}
}
} else {
// multiple value on rhs; in this case a call can't return
// multiple values. Assume len(stmt.Lhs) == len(stmt.Rhs)
for i := 0; i < len(stmt.Lhs); i++ {
if id, ok := stmt.Lhs[i].(*ast.Ident); ok {
if call, ok := stmt.Rhs[i].(*ast.CallExpr); ok {
if ignoreCall(pass, call) {
continue
}
if id.Name == "_" && callReturnsError(pass, call) {
reportUnhandledError(pass, id.NamePos, call)
}
} else if assert, ok := stmt.Rhs[i].(*ast.TypeAssertExpr); ok {
if assert.Type == nil {
// Shouldn't happen anyway, no multi assignment in type switches
continue
}
reportUnhandledError(pass, id.NamePos, nil)
}
}
}
}
default:
}
})
return nil, nil
}
func reportUnhandledError(pass *analysis.Pass, pos token.Pos, call *ast.CallExpr) {
pass.Reportf(pos, "Unhandled error for function call %s", fullName(pass, call))
}
func reportUnhandledTypeAssertion(pass *analysis.Pass, pos token.Pos) {
pass.Reportf(pos, "Unhandled type assertion check. You must test whether or not an "+
"interface implements the asserted type.")
}
func fullName(pass *analysis.Pass, call *ast.CallExpr) string {
_, fn, ok := selectorAndFunc(pass, call)
if !ok {
return ""
}
return fn.FullName()
}
// selectorAndFunc tries to get the selector and function from call expression.
// For example, given the call expression representing "a.b()", the selector
// is "a.b" and the function is "b" itself.
//
// The final return value will be true if it is able to do extract a selector
// from the call and look up the function object it refers to.
//
// If the call does not include a selector (like if it is a plain "f()" function call)
// then the final return value will be false.
func selectorAndFunc(pass *analysis.Pass, call *ast.CallExpr) (*ast.SelectorExpr, *types.Func, bool) {
if call == nil || call.Fun == nil {
return nil, nil, false
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return nil, nil, false
}
fn, ok := pass.TypesInfo.ObjectOf(sel.Sel).(*types.Func)
if !ok {
return nil, nil, false
}
return sel, fn, true
}
func ignoreCall(pass *analysis.Pass, call *ast.CallExpr) bool {
for _, name := range namesForExcludeCheck(pass, call) {
if exclusions[name] {
return true
}
}
return false
}
var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
func isErrorType(t types.Type) bool {
return types.Implements(t, errorType)
}
func callReturnsError(pass *analysis.Pass, call *ast.CallExpr) bool {
if isRecover(pass, call) {
return true
}
for _, isError := range errorsByArg(pass, call) {
if isError {
return true
}
}
return false
}
// errorsByArg returns a slice s such that
// len(s) == number of return types of call
// s[i] == true iff return type at position i from left is an error type
func errorsByArg(pass *analysis.Pass, call *ast.CallExpr) []bool {
switch t := pass.TypesInfo.Types[call].Type.(type) {
case *types.Named:
// Single return
return []bool{isErrorType(t)}
case *types.Pointer:
// Single return via pointer
return []bool{isErrorType(t)}
case *types.Tuple:
// Multiple returns
s := make([]bool, t.Len())
for i := 0; i < t.Len(); i++ {
switch et := t.At(i).Type().(type) {
case *types.Named:
// Single return
s[i] = isErrorType(et)
case *types.Pointer:
// Single return via pointer
s[i] = isErrorType(et)
default:
s[i] = false
}
}
return s
}
return []bool{false}
}
func isRecover(pass *analysis.Pass, call *ast.CallExpr) bool {
if fun, ok := call.Fun.(*ast.Ident); ok {
if _, ok := pass.TypesInfo.Uses[fun].(*types.Builtin); ok {
return fun.Name == "recover"
}
}
return false
}
func namesForExcludeCheck(pass *analysis.Pass, call *ast.CallExpr) []string {
sel, fn, ok := selectorAndFunc(pass, call)
if !ok {
return nil
}
name := fullName(pass, call)
if name == "" {
return nil
}
// This will be missing for functions without a receiver (like fmt.Printf),
// so just fall back to the the function's fullName in that case.
selection, ok := pass.TypesInfo.Selections[sel]
if !ok {
return []string{name}
}
// This will return with ok false if the function isn't defined
// on an interface, so just fall back to the fullName.
ts, ok := walkThroughEmbeddedInterfaces(selection)
if !ok {
return []string{name}
}
result := make([]string, len(ts))
for i, t := range ts {
// Like in fullName, vendored packages will have /vendor/ in their name,
// thus not matching vendored standard library packages. If we
// want to support vendored stdlib packages, we need to implement
// additional logic here.
result[i] = fmt.Sprintf("(%s).%s", t.String(), fn.Name())
}
return result
}
// walkThroughEmbeddedInterfaces returns a slice of Interfaces that
// we need to walk through in order to reach the actual definition,
// in an Interface, of the method selected by the given selection.
//
// false will be returned in the second return value if:
// - the right side of the selection is not a function
// - the actual definition of the function is not in an Interface
//
// The returned slice will contain all the interface types that need
// to be walked through to reach the actual definition.
//
// For example, say we have:
//
// type Inner interface {Method()}
// type Middle interface {Inner}
// type Outer interface {Middle}
// type T struct {Outer}
// type U struct {T}
// type V struct {U}
//
// And then the selector:
//
// V.Method
//
// We'll return [Outer, Middle, Inner] by first walking through the embedded structs
// until we reach the Outer interface, then descending through the embedded interfaces
// until we find the one that actually explicitly defines Method.
func walkThroughEmbeddedInterfaces(sel *types.Selection) ([]types.Type, bool) {
fn, ok := sel.Obj().(*types.Func)
if !ok {
return nil, false
}
// Start off at the receiver.
currentT := sel.Recv()
// First, we can walk through any Struct fields provided
// by the selection Index() method. We ignore the last
// index because it would give the method itself.
indexes := sel.Index()
for _, fieldIndex := range indexes[:len(indexes)-1] {
currentT = getTypeAtFieldIndex(currentT, fieldIndex)
}
// Now currentT is either a type implementing the actual function,
// an Invalid type (if the receiver is a package), or an interface.
//
// If it's not an Interface, then we're done, as this function
// only cares about Interface-defined functions.
//
// If it is an Interface, we potentially need to continue digging until
// we find the Interface that actually explicitly defines the function.
interfaceT, ok := maybeUnname(currentT).(*types.Interface)
if !ok {
return nil, false
}
// The first interface we pass through is this one we've found. We return the possibly
// wrapping types.Named because it is more useful to work with for callers.
result := []types.Type{currentT}
// If this interface itself explicitly defines the given method
// then we're done digging.
for !explicitlyDefinesMethod(interfaceT, fn) {
// Otherwise, we find which of the embedded interfaces _does_
// define the method, add it to our list, and loop.
namedInterfaceT, ok := getEmbeddedInterfaceDefiningMethod(interfaceT, fn)
if !ok {
// This should be impossible as long as we type-checked: either the
// interface or one of its embedded ones must implement the method...
panic(fmt.Sprintf("either %v or one of its embedded interfaces must implement %v", currentT, fn))
}
result = append(result, namedInterfaceT)
interfaceT, ok = namedInterfaceT.Underlying().(*types.Interface)
if !ok {
panic(fmt.Sprintf("either %v or one of its embedded interfaces must implement %v", currentT, fn))
}
}
return result, true
}
func getTypeAtFieldIndex(startingAt types.Type, fieldIndex int) types.Type {
t := maybeUnname(maybeDereference(startingAt))
s, ok := t.(*types.Struct)
if !ok {
panic(fmt.Sprintf("cannot get Field of a type that is not a struct, got a %T", t))
}
return s.Field(fieldIndex).Type()
}
// getEmbeddedInterfaceDefiningMethod searches through any embedded interfaces of the
// passed interface searching for one that defines the given function. If found, the
// types.Named wrapping that interface will be returned along with true in the second value.
//
// If no such embedded interface is found, nil and false are returned.
func getEmbeddedInterfaceDefiningMethod(interfaceT *types.Interface, fn *types.Func) (*types.Named, bool) {
for i := 0; i < interfaceT.NumEmbeddeds(); i++ {
embedded := interfaceT.Embedded(i)
if definesMethod(embedded.Underlying().(*types.Interface), fn) {
return embedded, true
}
}
return nil, false
}
func explicitlyDefinesMethod(interfaceT *types.Interface, fn *types.Func) bool {
for i := 0; i < interfaceT.NumExplicitMethods(); i++ {
if interfaceT.ExplicitMethod(i) == fn {
return true
}
}
return false
}
func definesMethod(interfaceT *types.Interface, fn *types.Func) bool {
for i := 0; i < interfaceT.NumMethods(); i++ {
if interfaceT.Method(i) == fn {
return true
}
}
return false
}
func maybeDereference(t types.Type) types.Type {
p, ok := t.(*types.Pointer)
if ok {
return p.Elem()
}
return t
}
func maybeUnname(t types.Type) types.Type {
n, ok := t.(*types.Named)
if ok {
return n.Underlying()
}
return t
}

View File

@@ -0,0 +1,93 @@
package errcheck
import (
"go/ast"
"go/parser"
"go/token"
"go/types"
"testing"
)
const commonSrc = `
package p
type Inner struct {}
func (Inner) Method()
type Outer struct {Inner}
type OuterP struct {*Inner}
type InnerInterface interface {
Method()
}
type OuterInterface interface {InnerInterface}
type MiddleInterfaceStruct struct {OuterInterface}
type OuterInterfaceStruct struct {MiddleInterfaceStruct}
var c = `
type testCase struct {
selector string
expectedOk bool
expected []string
}
func TestWalkThroughEmbeddedInterfaces(t *testing.T) {
cases := []testCase{
testCase{"Inner{}.Method", false, nil},
testCase{"(&Inner{}).Method", false, nil},
testCase{"Outer{}.Method", false, nil},
testCase{"InnerInterface.Method", true, []string{"test.InnerInterface"}},
testCase{"OuterInterface.Method", true, []string{"test.OuterInterface", "test.InnerInterface"}},
testCase{"OuterInterfaceStruct.Method", true, []string{"test.OuterInterface", "test.InnerInterface"}},
}
for _, c := range cases {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "test", commonSrc+c.selector, 0)
if err != nil {
t.Fatal(err)
}
conf := types.Config{}
info := types.Info{
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
_, err = conf.Check("test", fset, []*ast.File{f}, &info)
if err != nil {
t.Fatal(err)
}
ast.Inspect(f, func(n ast.Node) bool {
s, ok := n.(*ast.SelectorExpr)
if ok {
selection, ok := info.Selections[s]
if !ok {
t.Fatalf("no Selection!")
}
ts, ok := walkThroughEmbeddedInterfaces(selection)
if ok != c.expectedOk {
t.Errorf("expected ok %v got %v", c.expectedOk, ok)
return false
}
if !ok {
return false
}
if len(ts) != len(c.expected) {
t.Fatalf("expected %d types, got %d", len(c.expected), len(ts))
}
for i, e := range c.expected {
if e != ts[i].String() {
t.Errorf("mismatch at index %d: expected %s got %s", i, e, ts[i])
}
}
}
return true
})
}
}

View File

@@ -1,6 +1,7 @@
package maligned package maligned
import ( import (
"errors"
"go/ast" "go/ast"
"go/types" "go/types"
@@ -21,7 +22,10 @@ var Analyzer = &analysis.Analyzer{
} }
func run(pass *analysis.Pass) (interface{}, error) { func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
return nil, errors.New("analyzer is not type *inspector.Inspector")
}
nodeFilter := []ast.Node{ nodeFilter := []ast.Node{
(*ast.StructType)(nil), (*ast.StructType)(nil),

View File

@@ -1,6 +1,7 @@
package roughtime package roughtime
import ( import (
"errors"
"go/ast" "go/ast"
"golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis"
@@ -22,7 +23,10 @@ var Analyzer = &analysis.Analyzer{
} }
func run(pass *analysis.Pass) (interface{}, error) { func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
return nil, errors.New("analyzer is not type *inspector.Inspector")
}
nodeFilter := []ast.Node{ nodeFilter := []ast.Node{
(*ast.CallExpr)(nil), (*ast.CallExpr)(nil),

View File

@@ -139,7 +139,9 @@ func generateMarshalledFullStateAndBlock() error {
} }
// Temporarily incrementing the beacon state slot here since BeaconProposerIndex is a // Temporarily incrementing the beacon state slot here since BeaconProposerIndex is a
// function deterministic on beacon state slot. // function deterministic on beacon state slot.
beaconState.SetSlot(beaconState.Slot() + 1) if err := beaconState.SetSlot(beaconState.Slot() + 1); err != nil {
return err
}
proposerIdx, err := helpers.BeaconProposerIndex(beaconState) proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
if err != nil { if err != nil {
return err return err
@@ -149,7 +151,9 @@ func generateMarshalledFullStateAndBlock() error {
return err return err
} }
block.Signature = privs[proposerIdx].Sign(blockRoot[:], domain).Marshal() block.Signature = privs[proposerIdx].Sign(blockRoot[:], domain).Marshal()
beaconState.SetSlot(beaconState.Slot() - 1) if err := beaconState.SetSlot(beaconState.Slot() - 1); err != nil {
return err
}
beaconBytes, err := ssz.Marshal(beaconState) beaconBytes, err := ssz.Marshal(beaconState)
if err != nil { if err != nil {

View File

@@ -16,6 +16,7 @@ import (
"encoding/hex" "encoding/hex"
"flag" "flag"
"fmt" "fmt"
"io"
"net" "net"
"net/http" "net/http"
"os" "os"
@@ -169,17 +170,21 @@ func createListener(ipAddr string, port int, cfg discover.Config) *discover.UDPv
func (h *handler) httpHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) httpHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
allNodes := h.listener.AllNodes() write := func(w io.Writer, b []byte) {
w.Write([]byte("Nodes stored in the table:\n")) if _, err := w.Write(b); err != nil {
for i, n := range allNodes { log.WithError(err).Error("Failed to write to http response")
w.Write([]byte(fmt.Sprintf("Node %d\n", i))) }
w.Write([]byte(n.String() + "\n")) }
w.Write([]byte("Node ID: " + n.ID().String() + "\n")) allNodes := h.listener.AllNodes()
w.Write([]byte("IP: " + n.IP().String() + "\n")) write(w, []byte("Nodes stored in the table:\n"))
w.Write([]byte(fmt.Sprintf("UDP Port: %d", n.UDP()) + "\n")) for i, n := range allNodes {
w.Write([]byte(fmt.Sprintf("TCP Port: %d", n.UDP()) + "\n\n")) write(w, []byte(fmt.Sprintf("Node %d\n", i)))
write(w, []byte(n.String()+"\n"))
write(w, []byte("Node ID: "+n.ID().String()+"\n"))
write(w, []byte("IP: "+n.IP().String()+"\n"))
write(w, []byte(fmt.Sprintf("UDP Port: %d", n.UDP())+"\n"))
write(w, []byte(fmt.Sprintf("TCP Port: %d", n.UDP())+"\n\n"))
} }
} }
func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode.LocalNode, error) { func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode.LocalNode, error) {

View File

@@ -12,6 +12,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library", "@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library", "@com_github_ethereum_go_ethereum//params:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library", "@org_uber_go_automaxprocs//:go_default_library",
], ],
) )
@@ -37,6 +38,7 @@ go_image(
"@com_github_ethereum_go_ethereum//common:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library", "@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library", "@com_github_ethereum_go_ethereum//params:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_uber_go_automaxprocs//:go_default_library", "@org_uber_go_automaxprocs//:go_default_library",
], ],
) )

View File

@@ -17,6 +17,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/sirupsen/logrus"
_ "go.uber.org/automaxprocs" _ "go.uber.org/automaxprocs"
) )
@@ -135,7 +136,9 @@ func MetricsHTTP(w http.ResponseWriter, r *http.Request) {
allOut = append(allOut, fmt.Sprintf("%veth_load_seconds %0.2f", *prefix, loadSeconds)) allOut = append(allOut, fmt.Sprintf("%veth_load_seconds %0.2f", *prefix, loadSeconds))
allOut = append(allOut, fmt.Sprintf("%veth_loaded_addresses %v", *prefix, totalLoaded)) allOut = append(allOut, fmt.Sprintf("%veth_loaded_addresses %v", *prefix, totalLoaded))
allOut = append(allOut, fmt.Sprintf("%veth_total_addresses %v", *prefix, len(allWatching))) allOut = append(allOut, fmt.Sprintf("%veth_total_addresses %v", *prefix, len(allWatching)))
fmt.Fprintln(w, strings.Join(allOut, "\n")) if _, err := fmt.Fprintln(w, strings.Join(allOut, "\n")); err != nil {
logrus.WithError(err).Error("Failed to write metrics")
}
} }
// ReloadHTTP reloads the addresses from disk. // ReloadHTTP reloads the addresses from disk.
@@ -154,7 +157,11 @@ func OpenAddresses(filename string) error {
if err != nil { if err != nil {
return err return err
} }
defer file.Close() defer func() {
if err := file.Close(); err != nil {
panic(err)
}
}()
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
allWatching = []*Watching{} allWatching = []*Watching{}
for scanner.Scan() { for scanner.Scan() {

View File

@@ -57,7 +57,11 @@ func main() {
if err != nil { if err != nil {
log.Fatalf("Failed to create file at %s: %v", os.Args[2], err) log.Fatalf("Failed to create file at %s: %v", os.Args[2], err)
} }
defer outFile.Close() defer func() {
if err := outFile.Close(); err != nil {
panic(err)
}
}()
if err := keygen.SaveUnencryptedKeysToFile(outFile, out); err != nil { if err := keygen.SaveUnencryptedKeysToFile(outFile, out); err != nil {
log.Fatalf("Failed to save %v", err) log.Fatalf("Failed to save %v", err)
} }

View File

@@ -26,7 +26,11 @@ func main() {
if err != nil { if err != nil {
panic(err) panic(err)
} }
defer d.Close() defer func() {
if err := d.Close(); err != nil {
panic(err)
}
}()
gs, err := d.GenesisState(context.Background()) gs, err := d.GenesisState(context.Background())
if err != nil { if err != nil {
panic(err) panic(err)

View File

@@ -48,7 +48,11 @@ func NewUnencrypted(input string) (*Unencrypted, string, error) {
if err != nil { if err != nil {
return nil, unencryptedOptsHelp, err return nil, unencryptedOptsHelp, err
} }
defer reader.Close() defer func() {
if err := reader.Close(); err != nil {
log.WithError(err).Error("Failed to close file reader")
}
}()
keyMap, err := unencryptedKeysFromReader(reader) keyMap, err := unencryptedKeysFromReader(reader)
if err != nil { if err != nil {