mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Add golang.org/x/tools modernize static analyzer and fix violations (#15946)
* Ran gopls modernize to fix everything go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./... * Override rules_go provided dependency for golang.org/x/tools to v0.38.0. To update this, checked out rules_go, then ran `bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools` and copied the patches. * Fix buildtag violations and ignore buildtag violations in external * Introduce modernize analyzer package. * Add modernize "any" analyzer. * Fix violations of any analyzer * Add modernize "appendclipped" analyzer. * Fix violations of appendclipped * Add modernize "bloop" analyzer. * Add modernize "fmtappendf" analyzer. * Add modernize "forvar" analyzer. * Add modernize "mapsloop" analyzer. * Add modernize "minmax" analyzer. * Fix violations of minmax analyzer * Add modernize "omitzero" analyzer. * Add modernize "rangeint" analyzer. * Fix violations of rangeint. * Add modernize "reflecttypefor" analyzer. * Fix violations of reflecttypefor analyzer. * Add modernize "slicescontains" analyzer. * Add modernize "slicessort" analyzer. * Add modernize "slicesdelete" analyzer. This is disabled by default for now. See https://go.dev/issue/73686. * Add modernize "stringscutprefix" analyzer. * Add modernize "stringsbuilder" analyzer. * Fix violations of stringsbuilder analyzer. * Add modernize "stringsseq" analyzer. * Add modernize "testingcontext" analyzer. * Add modernize "waitgroup" analyzer. * Changelog fragment * gofmt * gazelle * Add modernize "newexpr" analyzer. * Disable newexpr until go1.26 * Add more details in WORKSPACE on how to update the override * @nalepae feedback on min() * gofmt * Fix violations of forvar
This commit is contained in:
@@ -40,7 +40,7 @@ type FieldTrie struct {
|
||||
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
|
||||
// trie according to the given parameters. Depending on whether the field is a basic/composite array
|
||||
// which is either fixed/variable length, it will appropriately determine the trie.
|
||||
func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
|
||||
func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements any, length uint64) (*FieldTrie, error) {
|
||||
if elements == nil {
|
||||
return &FieldTrie{
|
||||
field: field,
|
||||
@@ -92,14 +92,14 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
|
||||
numOfElems: numOfElems,
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(fieldInfo).Name())
|
||||
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeFor[types.DataType]().Name())
|
||||
}
|
||||
}
|
||||
|
||||
// RecomputeTrie rebuilds the affected branches in the trie according to the provided
|
||||
// changed indices and elements. This recomputes the trie according to the particular
|
||||
// field the trie is based on.
|
||||
func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]byte, error) {
|
||||
func (f *FieldTrie) RecomputeTrie(indices []uint64, elements any) ([32]byte, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var fieldRoot [32]byte
|
||||
@@ -162,7 +162,7 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
}
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeFor[types.DataType]().Name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,7 +251,7 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) {
|
||||
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
|
||||
return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeFor[types.DataType]().Name())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func (f *FieldTrie) validateIndices(idxs []uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateElements(field types.FieldIndex, fieldInfo types.DataType, elements interface{}, length uint64) error {
|
||||
func validateElements(field types.FieldIndex, fieldInfo types.DataType, elements any, length uint64) error {
|
||||
if fieldInfo == types.CompressedArray {
|
||||
comLength, err := field.ElemsInChunk()
|
||||
if err != nil {
|
||||
@@ -54,7 +54,7 @@ func validateElements(field types.FieldIndex, fieldInfo types.DataType, elements
|
||||
}
|
||||
|
||||
// fieldConverters converts the corresponding field and the provided elements to the appropriate roots.
|
||||
func fieldConverters(field types.FieldIndex, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func fieldConverters(field types.FieldIndex, indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
switch field {
|
||||
case types.BlockRoots, types.StateRoots, types.RandaoMixes:
|
||||
return convertRoots(indices, elements, convertAll)
|
||||
@@ -71,7 +71,7 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
}
|
||||
}
|
||||
|
||||
func convertRoots(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func convertRoots(indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
switch castedType := elements.(type) {
|
||||
case customtypes.BlockRoots:
|
||||
return handle32ByteMVslice(multi_value_slice.BuildEmptyCompositeSlice[[32]byte](castedType), indices, convertAll)
|
||||
@@ -86,7 +86,7 @@ func convertRoots(indices []uint64, elements interface{}, convertAll bool) ([][3
|
||||
}
|
||||
}
|
||||
|
||||
func convertEth1DataVotes(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func convertEth1DataVotes(indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
val, ok := elements.([]*ethpb.Eth1Data)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %T but got %T", []*ethpb.Eth1Data{}, elements)
|
||||
@@ -94,7 +94,7 @@ func convertEth1DataVotes(indices []uint64, elements interface{}, convertAll boo
|
||||
return handleEth1DataSlice(val, indices, convertAll)
|
||||
}
|
||||
|
||||
func convertValidators(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func convertValidators(indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
switch casted := elements.(type) {
|
||||
case []*ethpb.Validator:
|
||||
return handleValidatorMVSlice(multi_value_slice.BuildEmptyCompositeSlice[*ethpb.Validator](casted), indices, convertAll)
|
||||
@@ -105,7 +105,7 @@ func convertValidators(indices []uint64, elements interface{}, convertAll bool)
|
||||
}
|
||||
}
|
||||
|
||||
func convertAttestations(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func convertAttestations(indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
val, ok := elements.([]*ethpb.PendingAttestation)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %T but got %T", []*ethpb.PendingAttestation{}, elements)
|
||||
@@ -113,7 +113,7 @@ func convertAttestations(indices []uint64, elements interface{}, convertAll bool
|
||||
return handlePendingAttestationSlice(val, indices, convertAll)
|
||||
}
|
||||
|
||||
func convertBalances(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
func convertBalances(indices []uint64, elements any, convertAll bool) ([][32]byte, error) {
|
||||
switch casted := elements.(type) {
|
||||
case []uint64:
|
||||
return handleBalanceMVSlice(multi_value_slice.BuildEmptyCompositeSlice[uint64](casted), indices, convertAll)
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestFieldTrie_NativeState_fieldConvertersNative(t *testing.T) {
|
||||
type args struct {
|
||||
field types.FieldIndex
|
||||
indices []uint64
|
||||
elements interface{}
|
||||
elements any
|
||||
convertAll bool
|
||||
}
|
||||
tests := []struct {
|
||||
|
||||
@@ -63,8 +63,8 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlyDeposits
|
||||
ReadOnlyConsolidations
|
||||
ReadOnlyProposerLookahead
|
||||
ToProtoUnsafe() interface{}
|
||||
ToProto() interface{}
|
||||
ToProtoUnsafe() any
|
||||
ToProto() any
|
||||
GenesisTime() time.Time
|
||||
GenesisValidatorsRoot() []byte
|
||||
Slot() primitives.Slot
|
||||
|
||||
@@ -29,7 +29,7 @@ func (b *BeaconState) previousEpochAttestationsVal() []*ethpb.PendingAttestation
|
||||
}
|
||||
|
||||
res := make([]*ethpb.PendingAttestation, len(b.previousEpochAttestations))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
res[i] = b.previousEpochAttestations[i].Copy()
|
||||
}
|
||||
return res
|
||||
@@ -59,7 +59,7 @@ func (b *BeaconState) currentEpochAttestationsVal() []*ethpb.PendingAttestation
|
||||
}
|
||||
|
||||
res := make([]*ethpb.PendingAttestation, len(b.currentEpochAttestations))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
res[i] = b.currentEpochAttestations[i].Copy()
|
||||
}
|
||||
return res
|
||||
|
||||
@@ -48,7 +48,7 @@ func (b *BeaconState) eth1DataVotesVal() []*ethpb.Eth1Data {
|
||||
}
|
||||
|
||||
res := make([]*ethpb.Eth1Data, len(b.eth1DataVotes))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
res[i] = b.eth1DataVotes[i].Copy()
|
||||
}
|
||||
return res
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// ToProtoUnsafe returns the pointer value of the underlying
|
||||
// beacon state proto object, bypassing immutability. Use with care.
|
||||
func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
func (b *BeaconState) ToProtoUnsafe() any {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -265,7 +265,7 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
}
|
||||
|
||||
// ToProto the beacon state into a protobuf for usage.
|
||||
func (b *BeaconState) ToProto() interface{} {
|
||||
func (b *BeaconState) ToProto() any {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -552,7 +552,7 @@ func (b *BeaconState) StateRootAtIndex(idx uint64) ([]byte, error) {
|
||||
|
||||
// ProtobufBeaconStatePhase0 transforms an input into beacon state in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStatePhase0(s interface{}) (*ethpb.BeaconState, error) {
|
||||
func ProtobufBeaconStatePhase0(s any) (*ethpb.BeaconState, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconState)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type ethpb.BeaconState")
|
||||
@@ -562,7 +562,7 @@ func ProtobufBeaconStatePhase0(s interface{}) (*ethpb.BeaconState, error) {
|
||||
|
||||
// ProtobufBeaconStateAltair transforms an input into beacon state Altair in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateAltair(s interface{}) (*ethpb.BeaconStateAltair, error) {
|
||||
func ProtobufBeaconStateAltair(s any) (*ethpb.BeaconStateAltair, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateAltair")
|
||||
@@ -572,7 +572,7 @@ func ProtobufBeaconStateAltair(s interface{}) (*ethpb.BeaconStateAltair, error)
|
||||
|
||||
// ProtobufBeaconStateBellatrix transforms an input into beacon state Bellatrix in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateBellatrix(s interface{}) (*ethpb.BeaconStateBellatrix, error) {
|
||||
func ProtobufBeaconStateBellatrix(s any) (*ethpb.BeaconStateBellatrix, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateBellatrix")
|
||||
@@ -582,7 +582,7 @@ func ProtobufBeaconStateBellatrix(s interface{}) (*ethpb.BeaconStateBellatrix, e
|
||||
|
||||
// ProtobufBeaconStateCapella transforms an input into beacon state Capella in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateCapella(s interface{}) (*ethpb.BeaconStateCapella, error) {
|
||||
func ProtobufBeaconStateCapella(s any) (*ethpb.BeaconStateCapella, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateCapella)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateCapella")
|
||||
@@ -592,7 +592,7 @@ func ProtobufBeaconStateCapella(s interface{}) (*ethpb.BeaconStateCapella, error
|
||||
|
||||
// ProtobufBeaconStateDeneb transforms an input into beacon state Deneb in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateDeneb(s interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
func ProtobufBeaconStateDeneb(s any) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateDeneb")
|
||||
@@ -602,7 +602,7 @@ func ProtobufBeaconStateDeneb(s interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
|
||||
// ProtobufBeaconStateElectra transforms an input into beacon state Electra in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateElectra(s interface{}) (*ethpb.BeaconStateElectra, error) {
|
||||
func ProtobufBeaconStateElectra(s any) (*ethpb.BeaconStateElectra, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateElectra")
|
||||
@@ -612,7 +612,7 @@ func ProtobufBeaconStateElectra(s interface{}) (*ethpb.BeaconStateElectra, error
|
||||
|
||||
// ProtobufBeaconStateFulu transforms an input into beacon state Fulu in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateFulu(s interface{}) (*ethpb.BeaconStateFulu, error) {
|
||||
func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateFulu)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateFulu")
|
||||
|
||||
@@ -34,7 +34,7 @@ func (b *BeaconState) validatorsVal() []*ethpb.Validator {
|
||||
v = b.validatorsMultiValue.Value(b)
|
||||
|
||||
res := make([]*ethpb.Validator, len(v))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
val := v[i]
|
||||
if val == nil {
|
||||
continue
|
||||
@@ -52,7 +52,7 @@ func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
|
||||
|
||||
res := make([]state.ReadOnlyValidator, len(v))
|
||||
var err error
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
val := v[i]
|
||||
if val == nil {
|
||||
continue
|
||||
@@ -172,7 +172,7 @@ func (b *BeaconState) PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error)
|
||||
|
||||
l := b.validatorsLen()
|
||||
res := make([][fieldparams.BLSPubkeyLength]byte, l)
|
||||
for i := 0; i < l; i++ {
|
||||
for i := range l {
|
||||
val, err := b.validatorsMultiValue.At(b, uint64(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -201,7 +201,7 @@ func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyV
|
||||
return state.ErrNilValidatorsInState
|
||||
}
|
||||
l := b.validatorsMultiValue.Len(b)
|
||||
for i := 0; i < l; i++ {
|
||||
for i := range l {
|
||||
v, err := b.validatorsMultiValue.At(b, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -161,7 +161,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
|
||||
validatorsLen := b.validatorsLen()
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for i := uint64(0); i < bound; i++ {
|
||||
for range bound {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestExpectedWithdrawals(t *testing.T) {
|
||||
|
||||
vals := make([]*ethpb.Validator, 100)
|
||||
balances := make([]uint64, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -124,7 +124,7 @@ func TestExpectedWithdrawals(t *testing.T) {
|
||||
|
||||
vals := make([]*ethpb.Validator, 100)
|
||||
balances := make([]uint64, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -29,16 +29,16 @@ func TestBeaconState_RotateAttestations(t *testing.T) {
|
||||
func TestAppendBeyondIndicesLimit(t *testing.T) {
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
for i := range mockblockRoots {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
for i := range mockstateRoots {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
for i := range mockrandaoMixes {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
st, err := InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -61,13 +61,13 @@ func TestAppendBeyondIndicesLimit(t *testing.T) {
|
||||
}
|
||||
_, err = st.HashTreeRoot(t.Context())
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
assert.NoError(t, st.AppendValidator(ðpb.Validator{}))
|
||||
}
|
||||
assert.Equal(t, false, s.rebuildTrie[types.Validators])
|
||||
assert.NotEqual(t, len(s.dirtyIndices[types.Validators]), 0)
|
||||
|
||||
for i := 0; i < indicesLimit; i++ {
|
||||
for range indicesLimit {
|
||||
assert.NoError(t, st.AppendValidator(ðpb.Validator{}))
|
||||
}
|
||||
assert.Equal(t, true, s.rebuildTrie[types.Validators])
|
||||
@@ -88,10 +88,8 @@ func BenchmarkAppendPreviousEpochAttestations(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
ref := st.Copy()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
err := ref.AppendPreviousEpochAttestations(ðpb.PendingAttestation{Data: ðpb.AttestationData{Slot: primitives.Slot(i)}})
|
||||
require.NoError(b, err)
|
||||
ref = st.Copy()
|
||||
|
||||
@@ -30,7 +30,7 @@ func BenchmarkAppendEth1DataVotes(b *testing.B) {
|
||||
|
||||
ref := st.Copy()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
err := ref.AppendEth1DataVotes(ðpb.Eth1Data{DepositCount: uint64(i)})
|
||||
require.NoError(b, err)
|
||||
ref = st.Copy()
|
||||
|
||||
@@ -27,9 +27,7 @@ func BenchmarkAppendHistoricalRoots(b *testing.B) {
|
||||
|
||||
ref := st.Copy()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
err := ref.AppendHistoricalRoots(root)
|
||||
require.NoError(b, err)
|
||||
ref = st.Copy()
|
||||
@@ -52,9 +50,7 @@ func BenchmarkAppendHistoricalSummaries(b *testing.B) {
|
||||
|
||||
ref := st.Copy()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
err := ref.AppendHistoricalSummaries(ðpb.HistoricalSummary{})
|
||||
require.NoError(b, err)
|
||||
ref = st.Copy()
|
||||
|
||||
@@ -18,9 +18,8 @@ func BenchmarkParticipationBits(b *testing.B) {
|
||||
}
|
||||
|
||||
ref := st.Copy()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.NoError(b, ref.AppendCurrentParticipationBits(byte(2)))
|
||||
ref = st.Copy()
|
||||
}
|
||||
|
||||
@@ -88,8 +88,8 @@ func TestSetLatestExecutionPayloadHeader(t *testing.T) {
|
||||
|
||||
t.Run("mismatched type version returns error", func(t *testing.T) {
|
||||
require.Equal(t, len(payloads), len(payloadHeaders), "This test will fail if the payloads and payload headers are not same length")
|
||||
for i := 0; i < len(payloads); i++ {
|
||||
for j := 0; j < len(payloads); j++ {
|
||||
for i := range payloads {
|
||||
for j := range payloads {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
|
||||
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val state.ReadOnlyValidator) (*ethpb.Validator, error)) error {
|
||||
var changedVals []uint64
|
||||
l := b.validatorsMultiValue.Len(b)
|
||||
for i := 0; i < l; i++ {
|
||||
for i := range l {
|
||||
v, err := b.validatorsMultiValue.At(b, uint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -18,9 +18,8 @@ func BenchmarkAppendBalance(b *testing.B) {
|
||||
}
|
||||
|
||||
ref := st.Copy()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
require.NoError(b, ref.AppendBalance(uint64(i)))
|
||||
ref = st.Copy()
|
||||
}
|
||||
@@ -36,9 +35,8 @@ func BenchmarkAppendInactivityScore(b *testing.B) {
|
||||
}
|
||||
|
||||
ref := st.Copy()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
require.NoError(b, ref.AppendInactivityScore(uint64(i)))
|
||||
ref = st.Copy()
|
||||
}
|
||||
|
||||
@@ -47,11 +47,10 @@ func TestBeaconState_NoDeadlock_Phase0(t *testing.T) {
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
for _, f := range st.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
@@ -61,12 +60,11 @@ func TestBeaconState_NoDeadlock_Phase0(t *testing.T) {
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
@@ -103,11 +101,10 @@ func TestBeaconState_NoDeadlock_Altair(t *testing.T) {
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
for _, f := range s.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
@@ -117,12 +114,11 @@ func TestBeaconState_NoDeadlock_Altair(t *testing.T) {
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
@@ -159,11 +155,10 @@ func TestBeaconState_NoDeadlock_Bellatrix(t *testing.T) {
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
for _, f := range s.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
@@ -173,12 +168,11 @@ func TestBeaconState_NoDeadlock_Bellatrix(t *testing.T) {
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
@@ -215,11 +209,10 @@ func TestBeaconState_NoDeadlock_Capella(t *testing.T) {
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
for _, f := range s.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
@@ -229,12 +222,11 @@ func TestBeaconState_NoDeadlock_Capella(t *testing.T) {
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
@@ -271,11 +263,10 @@ func TestBeaconState_NoDeadlock_Deneb(t *testing.T) {
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Continuously lock and unlock the state
|
||||
// by acquiring the lock.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
for _, f := range s.stateFieldLeaves {
|
||||
f.Lock()
|
||||
if f.Empty() {
|
||||
@@ -285,12 +276,11 @@ func TestBeaconState_NoDeadlock_Deneb(t *testing.T) {
|
||||
f.FieldReference().AddRef()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
// Constantly read from the offending portion
|
||||
// of the code to ensure there is no possible
|
||||
// recursive read locking.
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
go func() {
|
||||
_ = st.FieldReferencesCount()
|
||||
}()
|
||||
@@ -307,7 +297,7 @@ func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
_, err := st.HashTreeRoot(t.Context())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
if i%2 == 0 {
|
||||
assert.NoError(t, st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), 1000))
|
||||
}
|
||||
@@ -392,7 +382,7 @@ func TestDuplicateDirtyIndices(t *testing.T) {
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64),
|
||||
}
|
||||
for i := uint64(0); i < indicesLimit-5; i++ {
|
||||
for i := range uint64(indicesLimit - 5) {
|
||||
newState.dirtyIndices[types.Balances] = append(newState.dirtyIndices[types.Balances], i)
|
||||
}
|
||||
// Append duplicates
|
||||
@@ -430,16 +420,16 @@ func generateState(t *testing.T) state.BeaconState {
|
||||
}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
for i := range mockblockRoots {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
for i := range mockstateRoots {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
for i := range mockrandaoMixes {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
newState, err := InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/fieldtrie"
|
||||
@@ -1204,7 +1204,7 @@ func (b *BeaconState) CopyAllTries() {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements any) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
// We can't lock the trie directly because the trie's variable gets reassigned,
|
||||
@@ -1241,9 +1241,7 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
sort.Slice(b.dirtyIndices[index], func(i int, j int) bool {
|
||||
return b.dirtyIndices[index][i] < b.dirtyIndices[index][j]
|
||||
})
|
||||
slices.Sort(b.dirtyIndices[index])
|
||||
root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -1252,7 +1250,7 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{}, length uint64) error {
|
||||
func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements any, length uint64) error {
|
||||
fTrie, err := fieldtrie.NewFieldTrie(index, fieldMap[index], elements, length)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -74,11 +74,11 @@ func setupGenesisState(t testing.TB, count uint64) *ethpb.BeaconState {
|
||||
}
|
||||
|
||||
func BenchmarkCloneValidators_Proto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
validators := make([]*ethpb.Validator, 16384)
|
||||
somePubKey := [fieldparams.BLSPubkeyLength]byte{1, 2, 3}
|
||||
someRoot := [32]byte{3, 4, 5}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: somePubKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
@@ -90,18 +90,18 @@ func BenchmarkCloneValidators_Proto(b *testing.B) {
|
||||
WithdrawableEpoch: 5,
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
cloneValidatorsWithProto(validators)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCloneValidators_Manual(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
validators := make([]*ethpb.Validator, 16384)
|
||||
somePubKey := [fieldparams.BLSPubkeyLength]byte{1, 2, 3}
|
||||
someRoot := [32]byte{3, 4, 5}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: somePubKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
@@ -113,33 +113,33 @@ func BenchmarkCloneValidators_Manual(b *testing.B) {
|
||||
WithdrawableEpoch: 5,
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
cloneValidatorsManually(validators)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStateClone_Proto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
params.SetupTestConfigCleanup(b)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
genesis := setupGenesisState(b, 64)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, ok := proto.Clone(genesis).(*ethpb.BeaconState)
|
||||
assert.Equal(b, true, ok, "Entity is not of type *ethpb.BeaconState")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStateClone_Manual(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
params.SetupTestConfigCleanup(b)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
genesis := setupGenesisState(b, 64)
|
||||
st, err := statenative.InitializeFromProtoPhase0(genesis)
|
||||
require.NoError(b, err)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = st.ToProto()
|
||||
}
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func BenchmarkStateClone_Manual(b *testing.B) {
|
||||
func cloneValidatorsWithProto(vals []*ethpb.Validator) []*ethpb.Validator {
|
||||
var ok bool
|
||||
res := make([]*ethpb.Validator, len(vals))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
res[i], ok = proto.Clone(vals[i]).(*ethpb.Validator)
|
||||
if !ok {
|
||||
log.Debug("Entity is not of type *ethpb.Validator")
|
||||
@@ -158,7 +158,7 @@ func cloneValidatorsWithProto(vals []*ethpb.Validator) []*ethpb.Validator {
|
||||
|
||||
func cloneValidatorsManually(vals []*ethpb.Validator) []*ethpb.Validator {
|
||||
res := make([]*ethpb.Validator, len(vals))
|
||||
for i := 0; i < len(res); i++ {
|
||||
for i := range res {
|
||||
val := vals[i]
|
||||
res[i] = ðpb.Validator{
|
||||
PublicKey: val.PublicKey,
|
||||
|
||||
@@ -26,7 +26,7 @@ type slotRootInfo struct {
|
||||
|
||||
// slotKeyFn takes the string representation of the slot to be used as key
|
||||
// to retrieve root.
|
||||
func slotKeyFn(obj interface{}) (string, error) {
|
||||
func slotKeyFn(obj any) (string, error) {
|
||||
s, ok := obj.(*slotRootInfo)
|
||||
if !ok {
|
||||
return "", errNotSlotRootInfo
|
||||
@@ -42,7 +42,7 @@ type rootStateInfo struct {
|
||||
|
||||
// rootKeyFn takes the string representation of the block root to be used as key
|
||||
// to retrieve epoch boundary state.
|
||||
func rootKeyFn(obj interface{}) (string, error) {
|
||||
func rootKeyFn(obj any) (string, error) {
|
||||
s, ok := obj.(*rootStateInfo)
|
||||
if !ok {
|
||||
return "", errNotRootStateInfo
|
||||
@@ -184,7 +184,7 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ func (c *CanonicalHistory) ancestorChain(ctx context.Context, tail interfaces.Re
|
||||
func reverseChain(c []interfaces.ReadOnlySignedBeaconBlock) {
|
||||
last := len(c) - 1
|
||||
swaps := (last + 1) / 2
|
||||
for i := 0; i < swaps; i++ {
|
||||
for i := range swaps {
|
||||
c[i], c[last-i] = c[last-i], c[i]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -530,7 +530,7 @@ func (m *mockCanonicalChecker) IsCanonical(_ context.Context, root [32]byte) (bo
|
||||
|
||||
func TestReverseChain(t *testing.T) {
|
||||
// test 0,1,2,3 elements to handle: zero case; single element; even number; odd number
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
t.Run(fmt.Sprintf("reverseChain with %d elements", i), func(t *testing.T) {
|
||||
actual := mockBlocks(i, incrFwd)
|
||||
expected := mockBlocks(i, incrBwd)
|
||||
@@ -538,7 +538,7 @@ func TestReverseChain(t *testing.T) {
|
||||
if len(actual) != len(expected) {
|
||||
t.Errorf("different list lengths")
|
||||
}
|
||||
for i := 0; i < len(actual); i++ {
|
||||
for i := range actual {
|
||||
sblockA, ok := actual[i].(*mock.SignedBeaconBlock)
|
||||
require.Equal(t, true, ok)
|
||||
blockA, ok := sblockA.BeaconBlock.(*mock.BeaconBlock)
|
||||
@@ -561,7 +561,7 @@ func incrBwd(n int, c chan uint32) {
|
||||
}
|
||||
|
||||
func incrFwd(n int, c chan uint32) {
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
c <- uint32(i)
|
||||
}
|
||||
close(c)
|
||||
|
||||
@@ -86,7 +86,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := 0; i < len(roots); i++ {
|
||||
for i := range roots {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func BenchmarkMerkleize_Buffered(b *testing.B) {
|
||||
roots := make([][32]byte, 8192)
|
||||
for i := 0; i < 8192; i++ {
|
||||
for i := range 8192 {
|
||||
roots[0] = [32]byte{byte(i)}
|
||||
}
|
||||
|
||||
@@ -21,9 +21,8 @@ func BenchmarkMerkleize_Buffered(b *testing.B) {
|
||||
return ssz.Merkleize(ssz.NewHasherFunc(hash.CustomSHA256Hasher()), count, limit, leafIndexer), nil
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := newMerkleize(roots, 8192, 8192)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func Eth1DataRootWithHasher(eth1Data *ethpb.Eth1Data) ([32]byte, error) {
|
||||
}
|
||||
|
||||
fieldRoots := make([][32]byte, 3)
|
||||
for i := 0; i < len(fieldRoots); i++ {
|
||||
for i := range fieldRoots {
|
||||
fieldRoots[i] = [32]byte{}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func Eth1DataRootWithHasher(eth1Data *ethpb.Eth1Data) ([32]byte, error) {
|
||||
// Eth1DatasRoot returns the hash tree root of input `eth1Datas`.
|
||||
func Eth1DatasRoot(eth1Datas []*ethpb.Eth1Data) ([32]byte, error) {
|
||||
eth1VotesRoots := make([][32]byte, 0, len(eth1Datas))
|
||||
for i := 0; i < len(eth1Datas); i++ {
|
||||
for i := range eth1Datas {
|
||||
eth1, err := Eth1DataRootWithHasher(eth1Datas[i])
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute eth1data merkleization")
|
||||
|
||||
@@ -24,7 +24,7 @@ func EpochAttestationsRoot(atts []*ethpb.PendingAttestation) ([32]byte, error) {
|
||||
}
|
||||
|
||||
roots := make([][32]byte, len(atts))
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
pendingRoot, err := pendingAttestationRoot(atts[i])
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not attestation merkleization")
|
||||
|
||||
@@ -56,7 +56,7 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
|
||||
func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < groupSize; i++ {
|
||||
for i := range groupSize {
|
||||
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not get validator field roots")
|
||||
@@ -98,7 +98,7 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
|
||||
// A validator's tree can represented with a depth of 3. As log2(8) = 3
|
||||
// Using this property we can lay out all the individual fields of a
|
||||
// validator and hash them in single level using our vectorized routine.
|
||||
for i := 0; i < validatorTreeDepth; i++ {
|
||||
for range validatorTreeDepth {
|
||||
// Overwrite input lists as we are hashing by level
|
||||
// and only need the highest level to proceed.
|
||||
roots = htr.VectorizedSha256(roots)
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestValidatorConstants(t *testing.T) {
|
||||
numFields := refV.NumField()
|
||||
numOfValFields := 0
|
||||
|
||||
for i := 0; i < numFields; i++ {
|
||||
for i := range numFields {
|
||||
if strings.Contains(refV.Type().Field(i).Name, "state") ||
|
||||
strings.Contains(refV.Type().Field(i).Name, "sizeCache") ||
|
||||
strings.Contains(refV.Type().Field(i).Name, "unknownFields") {
|
||||
@@ -43,13 +43,13 @@ func TestHashValidatorHelper(t *testing.T) {
|
||||
}
|
||||
roots := make([][32]byte, len(valList))
|
||||
hashValidatorHelper(valList, roots, 2, 2, &wg)
|
||||
for i := 0; i < 4*validatorFieldRoots; i++ {
|
||||
for i := range 4 * validatorFieldRoots {
|
||||
require.Equal(t, [32]byte{}, roots[i])
|
||||
}
|
||||
emptyValRoots, err := ValidatorFieldRoots(v)
|
||||
require.NoError(t, err)
|
||||
for i := 4; i < 6; i++ {
|
||||
for j := 0; j < validatorFieldRoots; j++ {
|
||||
for j := range validatorFieldRoots {
|
||||
require.Equal(t, emptyValRoots[j], roots[i*validatorFieldRoots+j])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,12 +34,10 @@ func packParticipationBits(bytes []byte) ([][32]byte, error) {
|
||||
numItems := len(bytes)
|
||||
chunks := make([][32]byte, 0, numItems/32)
|
||||
for i := 0; i < numItems; i += 32 {
|
||||
j := i + 32
|
||||
// We create our upper bound index of the chunk, if it is greater than numItems,
|
||||
// we set it as numItems itself.
|
||||
if j > numItems {
|
||||
j = numItems
|
||||
}
|
||||
j := min(
|
||||
// We create our upper bound index of the chunk, if it is greater than numItems,
|
||||
// we set it as numItems itself.
|
||||
i+32, numItems)
|
||||
// We create chunks from the list of items based on the
|
||||
// indices determined above.
|
||||
var chunk [32]byte
|
||||
|
||||
@@ -9,7 +9,7 @@ func BenchmarkReference_MinusRef(b *testing.B) {
|
||||
ref := &Reference{
|
||||
refs: math.MaxUint64,
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
ref.MinusRef()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
func TestState_FieldCount(t *testing.T) {
|
||||
count := params.BeaconConfig().BeaconStateFieldCount
|
||||
typ := reflect.TypeOf(ethpb.BeaconState{})
|
||||
typ := reflect.TypeFor[ethpb.BeaconState]()
|
||||
numFields := 0
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
if typ.Field(i).Name == "state" ||
|
||||
@@ -29,30 +29,30 @@ func TestState_FieldCount(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkHashTreeRoot_Generic_512(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
genesisState := setupGenesisState(b, 512)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := genesisState.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashTreeRoot_Generic_16384(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
genesisState := setupGenesisState(b, 16384)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := genesisState.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashTreeRoot_Generic_300000(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
genesisState := setupGenesisState(b, 300000)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := genesisState.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
buffer.Grow(64)
|
||||
|
||||
for i := uint8(0); i < depth; i++ {
|
||||
for i := range depth {
|
||||
layerLen := len(layers[i])
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if oddNodeLength {
|
||||
|
||||
@@ -36,8 +36,7 @@ func BenchmarkReturnTrieLayer_NormalAlgorithm(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
roots := retrieveBlockRoots(newState)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
layers, err := stateutil.ReturnTrieLayer(roots, uint64(len(roots)))
|
||||
assert.NoError(b, err)
|
||||
newRoot := *layers[len(layers)-1][0]
|
||||
@@ -51,8 +50,7 @@ func BenchmarkReturnTrieLayer_VectorizedAlgorithm(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
roots := retrieveBlockRoots(newState)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
layers, err := stateutil.ReturnTrieLayer(roots, uint64(len(roots)))
|
||||
assert.NoError(b, err)
|
||||
newRoot := *layers[len(layers)-1][0]
|
||||
@@ -96,8 +94,8 @@ func BenchmarkReturnTrieLayerVariable_NormalAlgorithm(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
roots = append(roots, rt)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
layers := stateutil.ReturnTrieLayerVariable(roots, params.BeaconConfig().ValidatorRegistryLimit)
|
||||
newRoot := *layers[len(layers)-1][0]
|
||||
newRoot, err = stateutil.AddInMixin(newRoot, uint64(len(validators)))
|
||||
@@ -118,8 +116,8 @@ func BenchmarkReturnTrieLayerVariable_VectorizedAlgorithm(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
roots = append(roots, rt)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
layers := stateutil.ReturnTrieLayerVariable(roots, params.BeaconConfig().ValidatorRegistryLimit)
|
||||
newRoot := *layers[len(layers)-1][0]
|
||||
newRoot, err = stateutil.AddInMixin(newRoot, uint64(len(validators)))
|
||||
|
||||
@@ -22,7 +22,7 @@ func UnrealizedCheckpointBalances(cp, pp []byte, validators ValReader, currentEp
|
||||
}
|
||||
|
||||
valLength := validators.Len()
|
||||
for i := 0; i < valLength; i++ {
|
||||
for i := range valLength {
|
||||
v, err := validators.At(i)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestState_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -100,7 +100,7 @@ func TestState_MVSlice_UnrealizedCheckpointBalances(t *testing.T) {
|
||||
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
|
||||
func BenchmarkUint64ListRootWithRegistryLimit(b *testing.B) {
|
||||
balances := make([]uint64, 100000)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
for i := range balances {
|
||||
balances[i] = uint64(i)
|
||||
}
|
||||
b.Run("100k balances", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := stateutil.Uint64ListRootWithRegistryLimit(balances)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
Reference in New Issue
Block a user