QSP-9 Prevent Casting to Int if Possible (#6349)

* no cast to int

* fix up significant casting issues

* more casting

* even more casting fixes

* more casts

* fix subnets

* back to ints

* final touches

* broken test fix

* add in blocks test fix

* unskip

* revert bytes fixes

* casting fixes

* Update beacon-chain/db/kv/state.go

* Update beacon-chain/db/kv/blocks.go

* fmt

* slash:

* fix val tests

* fix up conf

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
This commit is contained in:
Raul Jordan
2020-06-26 11:07:00 -05:00
committed by GitHub
parent 78465e2549
commit 252f758baa
43 changed files with 138 additions and 125 deletions

View File

@@ -4,7 +4,6 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"github.com/emicklei/dot" "github.com/emicklei/dot"
) )
@@ -51,10 +50,10 @@ func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
for i := len(nodes) - 1; i >= 0; i-- { for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node. // Construct label for each node.
slot := strconv.Itoa(int(nodes[i].Slot)) slot := fmt.Sprintf("%d", nodes[i].Slot)
weight := strconv.Itoa(int(nodes[i].Weight / 1e9)) // Convert unit Gwei to unit ETH. weight := fmt.Sprintf("%d", nodes[i].Weight/1e9) // Convert unit Gwei to unit ETH.
votes := strconv.Itoa(int(nodes[i].Weight / 1e9 / avgBalance)) votes := fmt.Sprintf("%d", nodes[i].Weight/1e9/avgBalance)
index := strconv.Itoa(i) index := fmt.Sprintf("%d", i)
g := nodes[i].Graffiti[:] g := nodes[i].Graffiti[:]
graffiti := hex.EncodeToString(g[:8]) graffiti := hex.EncodeToString(g[:8])
label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti

View File

@@ -222,7 +222,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
} }
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory. // Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if len(s.getInitSyncBlocks()) > int(initialSyncBlockCacheSize) { if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil { if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err return err
} }

View File

@@ -304,7 +304,7 @@ func (s *Service) Stop() error {
// Status always returns nil unless there is an error condition that causes // Status always returns nil unless there is an error condition that causes
// this service to be unhealthy. // this service to be unhealthy.
func (s *Service) Status() error { func (s *Service) Status() error {
if runtime.NumGoroutine() > int(s.maxRoutines) { if int64(runtime.NumGoroutine()) > s.maxRoutines {
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine()) return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
} }
return nil return nil

View File

@@ -20,7 +20,7 @@ var (
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain. // maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs // Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
// window to accept attestation based on latest spec. // window to accept attestation based on latest spec.
maxCheckpointStateSize = 10 maxCheckpointStateSize = uint64(10)
// Metrics. // Metrics.
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{ checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{

View File

@@ -118,12 +118,12 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
for i := 0; i < maxCheckpointStateSize+100; i++ { for i := uint64(0); i < maxCheckpointStateSize+100; i++ {
if err := st.SetSlot(uint64(i)); err != nil { if err := st.SetSlot(i); err != nil {
t.Fatal(err) t.Fatal(err)
} }
info := &CheckpointState{ info := &CheckpointState{
Checkpoint: &ethpb.Checkpoint{Epoch: uint64(i)}, Checkpoint: &ethpb.Checkpoint{Epoch: i},
State: st, State: st,
} }
if err := c.AddCheckpointState(info); err != nil { if err := c.AddCheckpointState(info); err != nil {
@@ -131,7 +131,7 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
} }
} }
if len(c.cache.ListKeys()) != maxCheckpointStateSize { if uint64(len(c.cache.ListKeys())) != maxCheckpointStateSize {
t.Errorf( t.Errorf(
"Expected hash cache key size to be %d, got %d", "Expected hash cache key size to be %d, got %d",
maxCheckpointStateSize, maxCheckpointStateSize,

View File

@@ -19,7 +19,7 @@ var (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache. // maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous // Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
// cache size as it considers 3 concurrent branches over 3 epochs. // cache size as it considers 3 concurrent branches over 3 epochs.
maxCommitteesCacheSize = 10 maxCommitteesCacheSize = uint64(10)
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache. // CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{ CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
@@ -96,7 +96,7 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
start, end := startEndIndices(item, indexOffSet) start, end := startEndIndices(item, indexOffSet)
if int(end) > len(item.ShuffledIndices) || end < start { if end > uint64(len(item.ShuffledIndices)) || end < start {
return nil, errors.New("requested index out of bound") return nil, errors.New("requested index out of bound")
} }

View File

@@ -38,7 +38,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
} }
} }
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize { if uint64(len(cache.CommitteeCache.ListKeys())) != maxCommitteesCacheSize {
t.Error("Incorrect key size") t.Error("Incorrect key size")
} }
} }
@@ -62,7 +62,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
} }
} }
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize { if uint64(len(cache.CommitteeCache.ListKeys())) != maxCommitteesCacheSize {
t.Error("Incorrect key size") t.Error("Incorrect key size")
} }
} }

View File

@@ -182,7 +182,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
} }
k := cache.CommitteeCache.ListKeys() k := cache.CommitteeCache.ListKeys()
if len(k) != maxCommitteesCacheSize { if uint64(len(k)) != maxCommitteesCacheSize {
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k)) t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
} }

View File

@@ -8,12 +8,12 @@ import (
var ( var (
// maxCacheSize is 4x of the epoch length for additional cache padding. // maxCacheSize is 4x of the epoch length for additional cache padding.
// Requests should be only accessing committees within defined epoch length. // Requests should be only accessing committees within defined epoch length.
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch) maxCacheSize = 4 * params.BeaconConfig().SlotsPerEpoch
) )
// trim the FIFO queue to the maxSize. // trim the FIFO queue to the maxSize.
func trim(queue *cache.FIFO, maxSize int) { func trim(queue *cache.FIFO, maxSize uint64) {
for s := len(queue.ListKeys()); s > maxSize; s-- { for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
_, err := queue.Pop(popProcessNoopFunc) _, err := queue.Pop(popProcessNoopFunc)
if err != nil { if err != nil {
// popProcessNoopFunc never returns an error, but we handle this anyway to make linter // popProcessNoopFunc never returns an error, but we handle this anyway to make linter

View File

@@ -191,7 +191,7 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t
} }
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11)) n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
if int(n) != 5 { if n != 5 {
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 5) t.Errorf("Returned unexpected deposits number %d wanted %d", n, 5)
} }
@@ -217,7 +217,7 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLes
} }
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2)) n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
if int(n) != 0 { if n != 0 {
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 0) t.Errorf("Returned unexpected deposits number %d wanted %d", n, 0)
} }

View File

@@ -25,12 +25,12 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs { func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch. // Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length. // Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2) cacheSize := params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2
attesterCache, err := lru.New(cacheSize) attesterCache, err := lru.New(int(cacheSize))
if err != nil { if err != nil {
panic(err) panic(err)
} }
aggregatorCache, err := lru.New(cacheSize) aggregatorCache, err := lru.New(int(cacheSize))
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@@ -65,7 +65,7 @@ func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0) c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
} }
for i := 0; i < 20; i++ { for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)} pubkey := [48]byte{byte(i)}
idxs, ok, _ := c.GetPersistentSubnets(pubkey[:]) idxs, ok, _ := c.GetPersistentSubnets(pubkey[:])
@@ -73,7 +73,7 @@ func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey) t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue continue
} }
if int(idxs[0]) != i { if idxs[0] != i {
t.Fatalf("Wanted index of %d but got %d", i, idxs[0]) t.Fatalf("Wanted index of %d but got %d", i, idxs[0])
} }
} }

View File

@@ -1109,7 +1109,7 @@ func ProcessVoluntaryExits(
if exit == nil || exit.Exit == nil { if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body") return nil, errors.New("nil voluntary exit in block body")
} }
if int(exit.Exit.ValidatorIndex) >= beaconState.NumValidators() { if exit.Exit.ValidatorIndex >= uint64(beaconState.NumValidators()) {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"validator index out of bound %d > %d", "validator index out of bound %d > %d",
exit.Exit.ValidatorIndex, exit.Exit.ValidatorIndex,

View File

@@ -11,9 +11,9 @@ import (
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
) )
func FakeDeposits(n int) []*ethpb.Eth1Data { func FakeDeposits(n uint64) []*ethpb.Eth1Data {
deposits := make([]*ethpb.Eth1Data, n) deposits := make([]*ethpb.Eth1Data, n)
for i := 0; i < n; i++ { for i := uint64(0); i < n; i++ {
deposits[i] = &ethpb.Eth1Data{ deposits[i] = &ethpb.Eth1Data{
DepositCount: 1, DepositCount: 1,
DepositRoot: []byte("root"), DepositRoot: []byte("root"),
@@ -30,7 +30,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
votingPeriodLength uint64 votingPeriodLength uint64
}{ }{
{ {
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{ data: &ethpb.Eth1Data{
DepositCount: 1, DepositCount: 1,
DepositRoot: []byte("root"), DepositRoot: []byte("root"),
@@ -38,7 +38,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
hasSupport: true, hasSupport: true,
votingPeriodLength: 7, votingPeriodLength: 7,
}, { }, {
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{ data: &ethpb.Eth1Data{
DepositCount: 1, DepositCount: 1,
DepositRoot: []byte("root"), DepositRoot: []byte("root"),
@@ -46,7 +46,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
hasSupport: false, hasSupport: false,
votingPeriodLength: 8, votingPeriodLength: 8,
}, { }, {
stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
data: &ethpb.Eth1Data{ data: &ethpb.Eth1Data{
DepositCount: 1, DepositCount: 1,
DepositRoot: []byte("root"), DepositRoot: []byte("root"),

View File

@@ -113,7 +113,7 @@ func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconStat
sort.Sort(sortableIndices{indices: activationQ, validators: vals}) sort.Sort(sortableIndices{indices: activationQ, validators: vals})
// Only activate just enough validators according to the activation churn limit. // Only activate just enough validators according to the activation churn limit.
limit := len(activationQ) limit := uint64(len(activationQ))
activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch) activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not get active validator count") return nil, errors.Wrap(err, "could not get active validator count")
@@ -125,8 +125,8 @@ func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconStat
} }
// Prevent churn limit cause index out of bound. // Prevent churn limit cause index out of bound.
if int(churnLimit) < limit { if churnLimit < limit {
limit = int(churnLimit) limit = churnLimit
} }
activationExitEpoch := helpers.ActivationExitEpoch(currentEpoch) activationExitEpoch := helpers.ActivationExitEpoch(currentEpoch)
@@ -274,22 +274,22 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
// Set total slashed balances. // Set total slashed balances.
slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector
slashedEpoch := int(nextEpoch % slashedExitLength) slashedEpoch := nextEpoch % slashedExitLength
slashings := state.Slashings() slashings := state.Slashings()
if len(slashings) != int(slashedExitLength) { if uint64(len(slashings)) != slashedExitLength {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"state slashing length %d different than EpochsPerHistoricalVector %d", "state slashing length %d different than EpochsPerHistoricalVector %d",
len(slashings), len(slashings),
slashedExitLength, slashedExitLength,
) )
} }
if err := state.UpdateSlashingsAtIndex(uint64(slashedEpoch) /* index */, 0 /* value */); err != nil { if err := state.UpdateSlashingsAtIndex(slashedEpoch /* index */, 0 /* value */); err != nil {
return nil, err return nil, err
} }
// Set RANDAO mix. // Set RANDAO mix.
randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector
if state.RandaoMixesLength() != int(randaoMixLength) { if uint64(state.RandaoMixesLength()) != randaoMixLength {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"state randao length %d different than EpochsPerHistoricalVector %d", "state randao length %d different than EpochsPerHistoricalVector %d",
state.RandaoMixesLength(), state.RandaoMixesLength(),

View File

@@ -399,7 +399,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
for i := 0; i < int(limit)+10; i++ { for i := uint64(0); i < limit+10; i++ {
base.Validators = append(base.Validators, &ethpb.Validator{ base.Validators = append(base.Validators, &ethpb.Validator{
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -417,11 +417,11 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
t.Errorf("Could not update registry %d, wanted activation eligibility epoch %d got %d", t.Errorf("Could not update registry %d, wanted activation eligibility epoch %d got %d",
i, currentEpoch, validator.ActivationEligibilityEpoch) i, currentEpoch, validator.ActivationEligibilityEpoch)
} }
if i < int(limit) && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) { if uint64(i) < limit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) {
t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d", t.Errorf("Could not update registry %d, validators failed to activate: wanted activation epoch %d, got %d",
i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch) i, helpers.ActivationExitEpoch(currentEpoch), validator.ActivationEpoch)
} }
if i >= int(limit) && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch { if uint64(i) >= limit && validator.ActivationEpoch != params.BeaconConfig().FarFutureEpoch {
t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d", t.Errorf("Could not update registry %d, validators should not have been activated, wanted activation epoch: %d, got %d",
i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch) i, params.BeaconConfig().FarFutureEpoch, validator.ActivationEpoch)
} }

View File

@@ -535,14 +535,14 @@ func TestShuffledIndices_ShuffleRightLength(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) { func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache() ClearCache()
validatorCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount) validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount) validators := make([]*ethpb.Validator, validatorCount)
indices := make([]uint64, validatorCount) indices := make([]uint64, validatorCount)
for i := 0; i < validatorCount; i++ { for i := uint64(0); i < validatorCount; i++ {
validators[i] = &ethpb.Validator{ validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
indices[i] = uint64(i) indices[i] = i
} }
state, err := beaconstate.InitializeFromProto(&pb.BeaconState{ state, err := beaconstate.InitializeFromProto(&pb.BeaconState{
Validators: validators, Validators: validators,
@@ -567,7 +567,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(indices) != int(params.BeaconConfig().TargetCommitteeSize) { if uint64(len(indices)) != params.BeaconConfig().TargetCommitteeSize {
t.Errorf("Did not save correct indices lengths, got %d wanted %d", len(indices), params.BeaconConfig().TargetCommitteeSize) t.Errorf("Did not save correct indices lengths, got %d wanted %d", len(indices), params.BeaconConfig().TargetCommitteeSize)
} }
} }

View File

@@ -53,18 +53,18 @@ func TestShuffleList_OK(t *testing.T) {
func TestSplitIndices_OK(t *testing.T) { func TestSplitIndices_OK(t *testing.T) {
var l []uint64 var l []uint64
validators := 64000 numValidators := uint64(64000)
for i := 0; i < validators; i++ { for i := uint64(0); i < numValidators; i++ {
l = append(l, uint64(i)) l = append(l, i)
} }
split := SplitIndices(l, params.BeaconConfig().SlotsPerEpoch) split := SplitIndices(l, params.BeaconConfig().SlotsPerEpoch)
if len(split) != int(params.BeaconConfig().SlotsPerEpoch) { if uint64(len(split)) != params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", params.BeaconConfig().SlotsPerEpoch, len(split)) t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", params.BeaconConfig().SlotsPerEpoch, len(split))
} }
for _, s := range split { for _, s := range split {
if len(s) != validators/int(params.BeaconConfig().SlotsPerEpoch) { if uint64(len(s)) != numValidators/params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", validators/int(params.BeaconConfig().SlotsPerEpoch), len(s)) t.Errorf("Split list failed due to incorrect length, wanted:%v, got:%v", numValidators/params.BeaconConfig().SlotsPerEpoch, len(s))
} }
} }
} }

View File

@@ -243,7 +243,7 @@ func ComputeProposerIndex(bState *stateTrie.BeaconState, activeIndices []uint64,
return 0, err return 0, err
} }
candidateIndex = activeIndices[candidateIndex] candidateIndex = activeIndices[candidateIndex]
if int(candidateIndex) >= bState.NumValidators() { if candidateIndex >= uint64(bState.NumValidators()) {
return 0, errors.New("active index out of range") return 0, errors.New("active index out of range")
} }
b := append(seed[:], bytesutil.Bytes8(i/32)...) b := append(seed[:], bytesutil.Bytes8(i/32)...)
@@ -295,7 +295,7 @@ func ComputeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
return 0, err return 0, err
} }
candidateIndex = activeIndices[candidateIndex] candidateIndex = activeIndices[candidateIndex]
if int(candidateIndex) >= len(validators) { if candidateIndex >= uint64(len(validators)) {
return 0, errors.New("active index out of range") return 0, errors.New("active index out of range")
} }
b := append(seed[:], bytesutil.Bytes8(i/32)...) b := append(seed[:], bytesutil.Bytes8(i/32)...)

View File

@@ -29,7 +29,7 @@ func TestGenesisBeaconState_OK(t *testing.T) {
if params.BeaconConfig().EpochsPerHistoricalVector != 65536 { if params.BeaconConfig().EpochsPerHistoricalVector != 65536 {
t.Error("EpochsPerHistoricalVector should be 8192 for these tests to pass") t.Error("EpochsPerHistoricalVector should be 8192 for these tests to pass")
} }
latestRandaoMixesLength := int(params.BeaconConfig().EpochsPerHistoricalVector) latestRandaoMixesLength := params.BeaconConfig().EpochsPerHistoricalVector
if params.BeaconConfig().HistoricalRootsLimit != 16777216 { if params.BeaconConfig().HistoricalRootsLimit != 16777216 {
t.Error("HistoricalRootsLimit should be 16777216 for these tests to pass") t.Error("HistoricalRootsLimit should be 16777216 for these tests to pass")
@@ -90,7 +90,7 @@ func TestGenesisBeaconState_OK(t *testing.T) {
} }
// Randomness and committees fields checks. // Randomness and committees fields checks.
if len(newState.RandaoMixes()) != latestRandaoMixesLength { if uint64(len(newState.RandaoMixes())) != latestRandaoMixesLength {
t.Error("Length of RandaoMixes was not correctly initialized") t.Error("Length of RandaoMixes was not correctly initialized")
} }
mix, err := newState.RandaoMixAtIndex(0) mix, err := newState.RandaoMixAtIndex(0)
@@ -175,12 +175,12 @@ func TestGenesisState_InitializesLatestBlockHashes(t *testing.T) {
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
got, want := len(s.BlockRoots()), int(params.BeaconConfig().SlotsPerHistoricalRoot) got, want := uint64(len(s.BlockRoots())), params.BeaconConfig().SlotsPerHistoricalRoot
if want != got { if want != got {
t.Errorf("Wrong number of recent block hashes. Got: %d Want: %d", got, want) t.Errorf("Wrong number of recent block hashes. Got: %d Want: %d", got, want)
} }
got = cap(s.BlockRoots()) got = uint64(cap(s.BlockRoots()))
if want != got { if want != got {
t.Errorf("The slice underlying array capacity is wrong. Got: %d Want: %d", got, want) t.Errorf("The slice underlying array capacity is wrong. Got: %d Want: %d", got, want)
} }

View File

@@ -588,7 +588,7 @@ func verifyOperationLengths(state *stateTrie.BeaconState, body *ethpb.BeaconBloc
} }
maxDeposits := mathutil.Min(params.BeaconConfig().MaxDeposits, eth1Data.DepositCount-state.Eth1DepositIndex()) maxDeposits := mathutil.Min(params.BeaconConfig().MaxDeposits, eth1Data.DepositCount-state.Eth1DepositIndex())
// Verify outstanding deposits are processed up to max number of deposits // Verify outstanding deposits are processed up to max number of deposits
if len(body.Deposits) != int(maxDeposits) { if uint64(len(body.Deposits)) != maxDeposits {
return fmt.Errorf("incorrect outstanding deposits in block body, wanted: %d, got: %d", return fmt.Errorf("incorrect outstanding deposits in block body, wanted: %d, got: %d",
maxDeposits, len(body.Deposits)) maxDeposits, len(body.Deposits))
} }

View File

@@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"math"
"strconv" "strconv"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
@@ -358,8 +357,10 @@ func (kv *Store) blocksAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, ind
ctx, span := trace.StartSpan(ctx, "BeaconDB.blocksAtSlotBitfieldIndex") ctx, span := trace.StartSpan(ctx, "BeaconDB.blocksAtSlotBitfieldIndex")
defer span.End() defer span.End()
highestSlot := index - 1 highestSlot := uint64(0)
highestSlot = int(math.Max(0, float64(highestSlot))) if uint64(index) > highestSlot+1 {
highestSlot = uint64(index - 1)
}
if highestSlot == 0 { if highestSlot == 0 {
gBlock, err := kv.GenesisBlock(ctx) gBlock, err := kv.GenesisBlock(ctx)
@@ -369,7 +370,7 @@ func (kv *Store) blocksAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, ind
return []*ethpb.SignedBeaconBlock{gBlock}, nil return []*ethpb.SignedBeaconBlock{gBlock}, nil
} }
f := filters.NewFilter().SetStartSlot(uint64(highestSlot)).SetEndSlot(uint64(highestSlot)) f := filters.NewFilter().SetStartSlot(highestSlot).SetEndSlot(highestSlot)
keys, err := getBlockRootsByFilter(ctx, tx, f) keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil { if err != nil {

View File

@@ -448,6 +448,9 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(highestAt) <= 0 {
t.Fatal("Got empty highest at slice")
}
if !proto.Equal(block1, highestAt[0]) { if !proto.Equal(block1, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block1, highestAt) t.Errorf("Wanted %v, received %v", block1, highestAt)
} }
@@ -455,6 +458,9 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(highestAt) <= 0 {
t.Fatal("Got empty highest at slice")
}
if !proto.Equal(block2, highestAt[0]) { if !proto.Equal(block2, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block2, highestAt) t.Errorf("Wanted %v, received %v", block2, highestAt)
} }
@@ -462,6 +468,9 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(highestAt) <= 0 {
t.Fatal("Got empty highest at slice")
}
if !proto.Equal(block3, highestAt[0]) { if !proto.Equal(block3, highestAt[0]) {
t.Errorf("Wanted %v, received %v", block3, highestAt) t.Errorf("Wanted %v, received %v", block3, highestAt)
} }

View File

@@ -14,7 +14,7 @@ import (
var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
func TestStore_IsFinalizedBlock(t *testing.T) { func TestStore_IsFinalizedBlock(t *testing.T) {
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch) slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
db := setupDB(t) db := setupDB(t)
ctx := context.Background() ctx := context.Background()
@@ -48,7 +48,7 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
} }
// All blocks up to slotsPerEpoch*2 should be in the finalized index. // All blocks up to slotsPerEpoch*2 should be in the finalized index.
for i := 0; i < slotsPerEpoch*2; i++ { for i := uint64(0); i < slotsPerEpoch*2; i++ {
root, err := stateutil.BlockRoot(blks[i].Block) root, err := stateutil.BlockRoot(blks[i].Block)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -57,7 +57,7 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
t.Errorf("Block at index %d was not considered finalized in the index", i) t.Errorf("Block at index %d was not considered finalized in the index", i)
} }
} }
for i := slotsPerEpoch * 3; i < len(blks); i++ { for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
root, err := stateutil.BlockRoot(blks[i].Block) root, err := stateutil.BlockRoot(blks[i].Block)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -99,7 +99,7 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so // be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
// it should not be considered "final and canonical" in the view at slot 6. // it should not be considered "final and canonical" in the view at slot 6.
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) { func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch) slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot) blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
blocks1 := append( blocks1 := append(
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch. makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
@@ -181,14 +181,14 @@ func sszRootOrDie(t *testing.T, block *ethpb.SignedBeaconBlock) []byte {
return root[:] return root[:]
} }
func makeBlocks(t *testing.T, i, n int, previousRoot [32]byte) []*ethpb.SignedBeaconBlock { func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []*ethpb.SignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlock, n) blocks := make([]*ethpb.SignedBeaconBlock, n)
for j := i; j < n+i; j++ { for j := i; j < n+i; j++ {
parentRoot := make([]byte, 32) parentRoot := make([]byte, 32)
copy(parentRoot, previousRoot[:]) copy(parentRoot, previousRoot[:])
blocks[j-i] = &ethpb.SignedBeaconBlock{ blocks[j-i] = &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{ Block: &ethpb.BeaconBlock{
Slot: uint64(j + 1), Slot: j + 1,
ParentRoot: parentRoot, ParentRoot: parentRoot,
}, },
} }

View File

@@ -3,7 +3,6 @@ package kv
import ( import (
"bytes" "bytes"
"context" "context"
"math"
"github.com/pkg/errors" "github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -394,8 +393,10 @@ func (kv *Store) statesAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, ind
ctx, span := trace.StartSpan(ctx, "BeaconDB.statesAtSlotBitfieldIndex") ctx, span := trace.StartSpan(ctx, "BeaconDB.statesAtSlotBitfieldIndex")
defer span.End() defer span.End()
highestSlot := index - 1 highestSlot := uint64(0)
highestSlot = int(math.Max(0, float64(highestSlot))) if uint64(index) > highestSlot+1 {
highestSlot = uint64(index - 1)
}
if highestSlot == 0 { if highestSlot == 0 {
gState, err := kv.GenesisState(ctx) gState, err := kv.GenesisState(ctx)
@@ -405,7 +406,7 @@ func (kv *Store) statesAtSlotBitfieldIndex(ctx context.Context, tx *bolt.Tx, ind
return []*state.BeaconState{gState}, nil return []*state.BeaconState{gState}, nil
} }
f := filters.NewFilter().SetStartSlot(uint64(highestSlot)).SetEndSlot(uint64(highestSlot)) f := filters.NewFilter().SetStartSlot(highestSlot).SetEndSlot(highestSlot)
keys, err := getBlockRootsByFilter(ctx, tx, f) keys, err := getBlockRootsByFilter(ctx, tx, f)
if err != nil { if err != nil {

View File

@@ -39,7 +39,7 @@ func (p *Pool) PendingAttesterSlashings(ctx context.Context, state *beaconstate.
pending := make([]*ethpb.AttesterSlashing, 0, params.BeaconConfig().MaxAttesterSlashings) pending := make([]*ethpb.AttesterSlashing, 0, params.BeaconConfig().MaxAttesterSlashings)
for i := 0; i < len(p.pendingAttesterSlashing); i++ { for i := 0; i < len(p.pendingAttesterSlashing); i++ {
slashing := p.pendingAttesterSlashing[i] slashing := p.pendingAttesterSlashing[i]
if len(pending) >= int(params.BeaconConfig().MaxAttesterSlashings) { if uint64(len(pending)) >= params.BeaconConfig().MaxAttesterSlashings {
break break
} }
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash) valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash)
@@ -78,7 +78,7 @@ func (p *Pool) PendingProposerSlashings(ctx context.Context, state *beaconstate.
pending := make([]*ethpb.ProposerSlashing, 0, params.BeaconConfig().MaxProposerSlashings) pending := make([]*ethpb.ProposerSlashing, 0, params.BeaconConfig().MaxProposerSlashings)
for i := 0; i < len(p.pendingProposerSlashing); i++ { for i := 0; i < len(p.pendingProposerSlashing); i++ {
slashing := p.pendingProposerSlashing[i] slashing := p.pendingProposerSlashing[i]
if len(pending) >= int(params.BeaconConfig().MaxProposerSlashings) { if uint64(len(pending)) >= params.BeaconConfig().MaxProposerSlashings {
break break
} }
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex) valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex)

View File

@@ -42,7 +42,7 @@ func (p *Pool) PendingExits(state *beaconstate.BeaconState, slot uint64) []*ethp
pending = append(pending, e) pending = append(pending, e)
} }
} }
if len(pending) > int(params.BeaconConfig().MaxVoluntaryExits) { if uint64(len(pending)) > params.BeaconConfig().MaxVoluntaryExits {
pending = pending[:params.BeaconConfig().MaxVoluntaryExits] pending = pending[:params.BeaconConfig().MaxVoluntaryExits]
} }
return pending return pending

View File

@@ -51,7 +51,7 @@ func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, erro
if err != nil { if err != nil {
return 0, err return 0, err
} }
if len(b) > int(MaxGossipSize) { if uint64(len(b)) > MaxGossipSize {
return 0, errors.Errorf("gossip message exceeds max gossip size: %d bytes > %d bytes", len(b), MaxGossipSize) return 0, errors.Errorf("gossip message exceeds max gossip size: %d bytes > %d bytes", len(b), MaxGossipSize)
} }
if e.UseSnappyCompression { if e.UseSnappyCompression {
@@ -121,7 +121,7 @@ func (e SszNetworkEncoder) DecodeGossip(b []byte, to interface{}) error {
return err return err
} }
} }
if len(b) > int(MaxGossipSize) { if uint64(len(b)) > MaxGossipSize {
return errors.Errorf("gossip message exceeds max gossip size: %d bytes > %d bytes", len(b), MaxGossipSize) return errors.Errorf("gossip message exceeds max gossip size: %d bytes > %d bytes", len(b), MaxGossipSize)
} }
return e.doDecode(b, to) return e.doDecode(b, to)

View File

@@ -21,7 +21,7 @@ var (
// maxCacheSize is 2x of the follow distance for additional cache padding. // maxCacheSize is 2x of the follow distance for additional cache padding.
// Requests should be only accessing blocks within recent blocks within the // Requests should be only accessing blocks within recent blocks within the
// Eth1FollowDistance. // Eth1FollowDistance.
maxCacheSize = int(2 * params.BeaconConfig().Eth1FollowDistance) maxCacheSize = 2 * params.BeaconConfig().Eth1FollowDistance
// Metrics // Metrics
blockCacheMiss = promauto.NewCounter(prometheus.CounterOpts{ blockCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
@@ -169,8 +169,8 @@ func (b *blockCache) AddBlock(blk *gethTypes.Block) error {
} }
// trim the FIFO queue to the maxSize. // trim the FIFO queue to the maxSize.
func trim(queue *cache.FIFO, maxSize int) { func trim(queue *cache.FIFO, maxSize uint64) {
for s := len(queue.ListKeys()); s > maxSize; s-- { for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
// #nosec G104 popProcessNoopFunc never returns an error // #nosec G104 popProcessNoopFunc never returns an error
if _, err := queue.Pop(popProcessNoopFunc); err != nil { // This never returns an error, but we'll handle anyway for sanity. if _, err := queue.Pop(popProcessNoopFunc); err != nil { // This never returns an error, but we'll handle anyway for sanity.
panic(err) panic(err)

View File

@@ -148,14 +148,14 @@ func TestBlockCache_maxSize(t *testing.T) {
} }
} }
if len(cache.hashCache.ListKeys()) != maxCacheSize { if uint64(len(cache.hashCache.ListKeys())) != maxCacheSize {
t.Errorf( t.Errorf(
"Expected hash cache key size to be %d, got %d", "Expected hash cache key size to be %d, got %d",
maxCacheSize, maxCacheSize,
len(cache.hashCache.ListKeys()), len(cache.hashCache.ListKeys()),
) )
} }
if len(cache.heightCache.ListKeys()) != maxCacheSize { if uint64(len(cache.heightCache.ListKeys())) != maxCacheSize {
t.Errorf( t.Errorf(
"Expected height cache key size to be %d, got %d", "Expected height cache key size to be %d, got %d",
maxCacheSize, maxCacheSize,

View File

@@ -732,7 +732,7 @@ func TestConsistentGenesisState(t *testing.T) {
testAcc.Backend.Commit() testAcc.Backend.Commit()
} }
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ { for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
testAcc.Backend.Commit() testAcc.Backend.Commit()
} }

View File

@@ -483,7 +483,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*protodb.Deposit
// Only add pending deposits if the container slice length // Only add pending deposits if the container slice length
// is more than the current index in state. // is more than the current index in state.
if len(ctrs) > int(currIndex) { if uint64(len(ctrs)) > currIndex {
for _, c := range ctrs[currIndex:] { for _, c := range ctrs[currIndex:] {
s.depositCache.InsertPendingDeposit(ctx, c.Deposit, c.Eth1BlockHeight, c.Index, bytesutil.ToBytes32(c.DepositRoot)) s.depositCache.InsertPendingDeposit(ctx, c.Deposit, c.Eth1BlockHeight, c.Index, bytesutil.ToBytes32(c.DepositRoot))
} }

View File

@@ -111,7 +111,7 @@ func (bs *Server) ListValidatorAssignments(
} }
for _, index := range filteredIndices[start:end] { for _, index := range filteredIndices[start:end] {
if int(index) >= requestedState.NumValidators() { if index >= uint64(requestedState.NumValidators()) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d", return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d",
index, requestedState.NumValidators()) index, requestedState.NumValidators())
} }
@@ -235,7 +235,7 @@ func (bs *Server) listValidatorAssignmentsUsingOldArchival(
} }
for _, index := range filteredIndices[start:end] { for _, index := range filteredIndices[start:end] {
if int(index) >= headState.NumValidators() { if index >= uint64(headState.NumValidators()) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d", return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d",
index, headState.NumValidators()) index, headState.NumValidators())
} }

View File

@@ -81,7 +81,7 @@ func (bs *Server) ListValidatorBalances(
filtered[index] = true filtered[index] = true
if int(index) >= len(balances) { if index >= uint64(len(balances)) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d", return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d",
index, len(balances)) index, len(balances))
} }
@@ -95,7 +95,7 @@ func (bs *Server) ListValidatorBalances(
} }
for _, index := range req.Indices { for _, index := range req.Indices {
if int(index) >= len(balances) { if index >= uint64(len(balances)) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d", return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d",
index, len(balances)) index, len(balances))
} }
@@ -225,7 +225,7 @@ func (bs *Server) listValidatorsBalancesUsingOldArchival(
filtered[index] = true filtered[index] = true
if int(index) >= len(balances) { if index >= uint64(len(balances)) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d", return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d",
index, len(balances)) index, len(balances))
} }
@@ -239,7 +239,7 @@ func (bs *Server) listValidatorsBalancesUsingOldArchival(
} }
for _, index := range req.Indices { for _, index := range req.Indices {
if int(index) >= len(balances) { if index >= uint64(len(balances)) {
if epoch <= helpers.CurrentEpoch(headState) { if epoch <= helpers.CurrentEpoch(headState) {
return nil, status.Errorf(codes.OutOfRange, "Validator index %d does not exist in historical balances", return nil, status.Errorf(codes.OutOfRange, "Validator index %d does not exist in historical balances",
index) index)
@@ -878,7 +878,7 @@ func (bs *Server) GetValidatorQueue(
}) })
// Only activate just enough validators according to the activation churn limit. // Only activate just enough validators according to the activation churn limit.
activationQueueChurn := len(activationQ) activationQueueChurn := uint64(len(activationQ))
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.CurrentEpoch(headState)) activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.CurrentEpoch(headState))
if err != nil { if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err) return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
@@ -894,20 +894,20 @@ func (bs *Server) GetValidatorQueue(
exitQueueEpoch = i exitQueueEpoch = i
} }
} }
exitQueueChurn := 0 exitQueueChurn := uint64(0)
for _, val := range vals { for _, val := range vals {
if val.ExitEpoch == exitQueueEpoch { if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++ exitQueueChurn++
} }
} }
// Prevent churn limit from causing index out of bound issues. // Prevent churn limit from causing index out of bound issues.
if int(churnLimit) < activationQueueChurn { if churnLimit < activationQueueChurn {
activationQueueChurn = int(churnLimit) activationQueueChurn = churnLimit
} }
if int(churnLimit) < exitQueueChurn { if churnLimit < exitQueueChurn {
// If we are above the churn limit, we simply increase the churn by one. // If we are above the churn limit, we simply increase the churn by one.
exitQueueEpoch++ exitQueueEpoch++
exitQueueChurn = int(churnLimit) exitQueueChurn = churnLimit
} }
// We use the exit queue churn to determine if we have passed a churn limit. // We use the exit queue churn to determine if we have passed a churn limit.
@@ -1122,7 +1122,7 @@ func (bs *Server) GetIndividualVotes(
} }
vals := requestedState.ValidatorsReadOnly() vals := requestedState.ValidatorsReadOnly()
for _, index := range filteredIndices { for _, index := range filteredIndices {
if int(index) >= len(v) { if index >= uint64(len(v)) {
votes = append(votes, &ethpb.IndividualVotesRespond_IndividualVote{ValidatorIndex: index}) votes = append(votes, &ethpb.IndividualVotesRespond_IndividualVote{ValidatorIndex: index})
continue continue
} }

View File

@@ -206,8 +206,8 @@ func assignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorStatus) {
assignedIdxs = append(assignedIdxs, uint64(assignedIdx)) assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
} }
assignedDuration := randGen.Intn(int(params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription)) assignedDuration := uint64(randGen.Intn(int(params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription)))
assignedDuration += int(params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription) assignedDuration += params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription
totalDuration := epochDuration * time.Duration(assignedDuration) totalDuration := epochDuration * time.Duration(assignedDuration)
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second) cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)

View File

@@ -320,7 +320,7 @@ func (vs *Server) deposits(ctx context.Context, currentVote *ethpb.Eth1Data) ([]
} }
// Limit the return of pending deposits to not be more than max deposits allowed in block. // Limit the return of pending deposits to not be more than max deposits allowed in block.
var pendingDeposits []*ethpb.Deposit var pendingDeposits []*ethpb.Deposit
for i := 0; i < len(pendingDeps) && i < int(params.BeaconConfig().MaxDeposits); i++ { for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit) pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
} }
return pendingDeposits, nil return pendingDeposits, nil
@@ -393,7 +393,7 @@ func (vs *Server) filterAttestationsForBlockInclusion(ctx context.Context, state
inValidAtts := make([]*ethpb.Attestation, 0, len(atts)) inValidAtts := make([]*ethpb.Attestation, 0, len(atts))
for i, att := range atts { for i, att := range atts {
if i == int(params.BeaconConfig().MaxAttestations) { if uint64(i) == params.BeaconConfig().MaxAttestations {
break break
} }
@@ -458,11 +458,13 @@ func (vs *Server) packAttestations(ctx context.Context, latestState *stateTrie.B
} }
// If there is any room left in the block, consider unaggregated attestations as well. // If there is any room left in the block, consider unaggregated attestations as well.
if len(atts) < int(params.BeaconConfig().MaxAttestations) { numAtts := uint64(len(atts))
if numAtts < params.BeaconConfig().MaxAttestations {
uAtts := vs.AttPool.UnaggregatedAttestations() uAtts := vs.AttPool.UnaggregatedAttestations()
uAtts, err = vs.filterAttestationsForBlockInclusion(ctx, latestState, uAtts) uAtts, err = vs.filterAttestationsForBlockInclusion(ctx, latestState, uAtts)
if len(uAtts)+len(atts) > int(params.BeaconConfig().MaxAttestations) { numUAtts := uint64(len(uAtts))
uAtts = uAtts[:int(params.BeaconConfig().MaxAttestations)-len(atts)] if numUAtts+numAtts > params.BeaconConfig().MaxAttestations {
uAtts = uAtts[:params.BeaconConfig().MaxAttestations-numAtts]
} }
atts = append(atts, uAtts...) atts = append(atts, uAtts...)
} }

View File

@@ -143,13 +143,13 @@ func TestGetBlock_OK(t *testing.T) {
if !bytes.Equal(block.Body.Graffiti, req.Graffiti) { if !bytes.Equal(block.Body.Graffiti, req.Graffiti) {
t.Fatal("Expected block to have correct graffiti") t.Fatal("Expected block to have correct graffiti")
} }
if len(block.Body.ProposerSlashings) != int(params.BeaconConfig().MaxProposerSlashings) { if uint64(len(block.Body.ProposerSlashings)) != params.BeaconConfig().MaxProposerSlashings {
t.Fatalf("Wanted %d proposer slashings, got %d", params.BeaconConfig().MaxProposerSlashings, len(block.Body.ProposerSlashings)) t.Fatalf("Wanted %d proposer slashings, got %d", params.BeaconConfig().MaxProposerSlashings, len(block.Body.ProposerSlashings))
} }
if !reflect.DeepEqual(block.Body.ProposerSlashings, proposerSlashings) { if !reflect.DeepEqual(block.Body.ProposerSlashings, proposerSlashings) {
t.Errorf("Wanted proposer slashing %v, got %v", proposerSlashings, block.Body.ProposerSlashings) t.Errorf("Wanted proposer slashing %v, got %v", proposerSlashings, block.Body.ProposerSlashings)
} }
if len(block.Body.AttesterSlashings) != int(params.BeaconConfig().MaxAttesterSlashings) { if uint64(len(block.Body.AttesterSlashings)) != params.BeaconConfig().MaxAttesterSlashings {
t.Fatalf("Wanted %d attester slashings, got %d", params.BeaconConfig().MaxAttesterSlashings, len(block.Body.AttesterSlashings)) t.Fatalf("Wanted %d attester slashings, got %d", params.BeaconConfig().MaxAttesterSlashings, len(block.Body.AttesterSlashings))
} }
if !reflect.DeepEqual(block.Body.AttesterSlashings, attSlashings) { if !reflect.DeepEqual(block.Body.AttesterSlashings, attSlashings) {
@@ -266,7 +266,7 @@ func TestGetBlock_AddsUnaggregatedAtts(t *testing.T) {
if !bytes.Equal(block.Body.Graffiti, req.Graffiti) { if !bytes.Equal(block.Body.Graffiti, req.Graffiti) {
t.Fatal("Expected block to have correct graffiti") t.Fatal("Expected block to have correct graffiti")
} }
if len(block.Body.Attestations) != int(params.BeaconConfig().MaxAttestations) { if uint64(len(block.Body.Attestations)) != params.BeaconConfig().MaxAttestations {
t.Fatalf("Expected a full block of attestations, only received %d", len(block.Body.Attestations)) t.Fatalf("Expected a full block of attestations, only received %d", len(block.Body.Attestations))
} }
hasUnaggregatedAtt := false hasUnaggregatedAtt := false
@@ -994,7 +994,7 @@ func TestPendingDeposits_CantReturnMoreThanMax(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(deposits) != int(params.BeaconConfig().MaxDeposits) { if uint64(len(deposits)) != params.BeaconConfig().MaxDeposits {
t.Errorf( t.Errorf(
"Received unexpected number of pending deposits: %d, wanted: %d", "Received unexpected number of pending deposits: %d, wanted: %d",
len(deposits), len(deposits),

View File

@@ -203,7 +203,7 @@ func retrieveStatusForPubKey(headState *stateTrie.BeaconState, pubKey []byte) (e
return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errors.New("head state does not exist") return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errors.New("head state does not exist")
} }
idx, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey)) idx, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok || int(idx) >= headState.NumValidators() { if !ok || idx >= uint64(headState.NumValidators()) {
return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errPubkeyDoesNotExist return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errPubkeyDoesNotExist
} }
return assignmentStatus(headState, idx), idx, nil return assignmentStatus(headState, idx), idx, nil

View File

@@ -278,7 +278,7 @@ func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) {
b.lock.RLock() b.lock.RLock()
defer b.lock.RUnlock() defer b.lock.RUnlock()
if len(b.state.BlockRoots) <= int(idx) { if uint64(len(b.state.BlockRoots)) <= idx {
return nil, fmt.Errorf("index %d out of range", idx) return nil, fmt.Errorf("index %d out of range", idx)
} }
root := make([]byte, 32) root := make([]byte, 32)
@@ -575,7 +575,7 @@ func (b *BeaconState) BalanceAtIndex(idx uint64) (uint64, error) {
b.lock.RLock() b.lock.RLock()
defer b.lock.RUnlock() defer b.lock.RUnlock()
if len(b.state.Balances) <= int(idx) { if uint64(len(b.state.Balances)) <= idx {
return 0, fmt.Errorf("index of %d does not exist", idx) return 0, fmt.Errorf("index of %d does not exist", idx)
} }
return b.state.Balances[idx], nil return b.state.Balances[idx], nil
@@ -630,7 +630,7 @@ func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) {
b.lock.RLock() b.lock.RLock()
defer b.lock.RUnlock() defer b.lock.RUnlock()
if len(b.state.RandaoMixes) <= int(idx) { if uint64(len(b.state.RandaoMixes)) <= idx {
return nil, fmt.Errorf("index %d out of range", idx) return nil, fmt.Errorf("index %d out of range", idx)
} }
root := make([]byte, 32) root := make([]byte, 32)

View File

@@ -119,7 +119,7 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
if len(b.state.BlockRoots) <= int(idx) { if uint64(len(b.state.BlockRoots)) <= idx {
return fmt.Errorf("invalid index provided %d", idx) return fmt.Errorf("invalid index provided %d", idx)
} }
@@ -170,8 +170,9 @@ func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) err
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
b.lock.RLock() b.lock.RLock()
if len(b.state.StateRoots) <= int(idx) { if uint64(len(b.state.StateRoots)) <= idx {
b.lock.RUnlock() b.lock.RUnlock()
return errors.Errorf("invalid index provided %d", idx) return errors.Errorf("invalid index provided %d", idx)
} }
@@ -347,7 +348,7 @@ func (b *BeaconState) UpdateValidatorAtIndex(idx uint64, val *ethpb.Validator) e
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
if len(b.state.Validators) <= int(idx) { if uint64(len(b.state.Validators)) <= idx {
return errors.Errorf("invalid index provided %d", idx) return errors.Errorf("invalid index provided %d", idx)
} }
@@ -409,7 +410,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx uint64, val uint64) error {
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
if len(b.state.Balances) <= int(idx) { if uint64(len(b.state.Balances)) <= idx {
return errors.Errorf("invalid index provided %d", idx) return errors.Errorf("invalid index provided %d", idx)
} }
@@ -455,7 +456,7 @@ func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error {
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
if len(b.state.RandaoMixes) <= int(idx) { if uint64(len(b.state.RandaoMixes)) <= idx {
return errors.Errorf("invalid index provided %d", idx) return errors.Errorf("invalid index provided %d", idx)
} }
@@ -504,7 +505,7 @@ func (b *BeaconState) UpdateSlashingsAtIndex(idx uint64, val uint64) error {
if !b.HasInnerState() { if !b.HasInnerState() {
return ErrNilInnerState return ErrNilInnerState
} }
if len(b.state.Slashings) <= int(idx) { if uint64(len(b.state.Slashings)) <= idx {
return errors.Errorf("invalid index provided %d", idx) return errors.Errorf("invalid index provided %d", idx)
} }
b.lock.RLock() b.lock.RLock()

View File

@@ -54,7 +54,7 @@ func (s *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *p
} }
exitedEpochSlot := exit.Exit.Epoch * params.BeaconConfig().SlotsPerEpoch exitedEpochSlot := exit.Exit.Epoch * params.BeaconConfig().SlotsPerEpoch
if int(exit.Exit.ValidatorIndex) >= headState.NumValidators() { if exit.Exit.ValidatorIndex >= uint64(headState.NumValidators()) {
return pubsub.ValidationReject return pubsub.ValidationReject
} }
val, err := headState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex) val, err := headState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)

View File

@@ -265,7 +265,7 @@ func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64,
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
// Previously pruned, we should return false. // Previously pruned, we should return false.
if int(targetEpoch) <= int(history.LatestEpochWritten)-int(wsPeriod) { if targetEpoch+wsPeriod <= history.LatestEpochWritten {
return false return false
} }
@@ -317,7 +317,7 @@ func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEp
// returns the "default" FAR_FUTURE_EPOCH value. // returns the "default" FAR_FUTURE_EPOCH value.
func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 { func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 {
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
if targetEpoch > history.LatestEpochWritten || int(targetEpoch) < int(history.LatestEpochWritten)-int(wsPeriod) { if targetEpoch > history.LatestEpochWritten || targetEpoch+wsPeriod < history.LatestEpochWritten {
return params.BeaconConfig().FarFutureEpoch return params.BeaconConfig().FarFutureEpoch
} }
return history.TargetToSource[targetEpoch%wsPeriod] return history.TargetToSource[targetEpoch%wsPeriod]

View File

@@ -194,7 +194,7 @@ func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64,
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
// Previously pruned, we should return false. // Previously pruned, we should return false.
if int(targetEpoch) <= int(history.LatestEpochWritten)-int(wsPeriod) { if targetEpoch+wsPeriod <= history.LatestEpochWritten {
return false return false
} }
@@ -246,7 +246,7 @@ func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEp
// returns the "default" FAR_FUTURE_EPOCH value. // returns the "default" FAR_FUTURE_EPOCH value.
func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 { func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) uint64 {
wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod
if targetEpoch > history.LatestEpochWritten || int(targetEpoch) < int(history.LatestEpochWritten)-int(wsPeriod) { if targetEpoch > history.LatestEpochWritten || targetEpoch+wsPeriod < history.LatestEpochWritten {
return params.BeaconConfig().FarFutureEpoch return params.BeaconConfig().FarFutureEpoch
} }
return history.TargetToSource[targetEpoch%wsPeriod] return history.TargetToSource[targetEpoch%wsPeriod]