mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-05 03:24:14 -05:00
Add golang.org/x/tools modernize static analyzer and fix violations (#15946)
* Ran gopls modernize to fix everything go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./... * Override rules_go provided dependency for golang.org/x/tools to v0.38.0. To update this, checked out rules_go, then ran `bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools` and copied the patches. * Fix buildtag violations and ignore buildtag violations in external * Introduce modernize analyzer package. * Add modernize "any" analyzer. * Fix violations of any analyzer * Add modernize "appendclipped" analyzer. * Fix violations of appendclipped * Add modernize "bloop" analyzer. * Add modernize "fmtappendf" analyzer. * Add modernize "forvar" analyzer. * Add modernize "mapsloop" analyzer. * Add modernize "minmax" analyzer. * Fix violations of minmax analyzer * Add modernize "omitzero" analyzer. * Add modernize "rangeint" analyzer. * Fix violations of rangeint. * Add modernize "reflecttypefor" analyzer. * Fix violations of reflecttypefor analyzer. * Add modernize "slicescontains" analyzer. * Add modernize "slicessort" analyzer. * Add modernize "slicesdelete" analyzer. This is disabled by default for now. See https://go.dev/issue/73686. * Add modernize "stringscutprefix" analyzer. * Add modernize "stringsbuilder" analyzer. * Fix violations of stringsbuilder analyzer. * Add modernize "stringsseq" analyzer. * Add modernize "testingcontext" analyzer. * Add modernize "waitgroup" analyzer. * Changelog fragment * gofmt * gazelle * Add modernize "newexpr" analyzer. * Disable newexpr until go1.26 * Add more details in WORKSPACE on how to update the override * @nalepae feedback on min() * gofmt * Fix violations of forvar
This commit is contained in:
19
BUILD.bazel
19
BUILD.bazel
@@ -197,6 +197,25 @@ nogo(
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/modernize/any:go_default_library",
|
||||
"//tools/analyzers/modernize/appendclipped:go_default_library",
|
||||
"//tools/analyzers/modernize/bloop:go_default_library",
|
||||
"//tools/analyzers/modernize/fmtappendf:go_default_library",
|
||||
"//tools/analyzers/modernize/forvar:go_default_library",
|
||||
"//tools/analyzers/modernize/mapsloop:go_default_library",
|
||||
"//tools/analyzers/modernize/minmax:go_default_library",
|
||||
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
|
||||
"//tools/analyzers/modernize/omitzero:go_default_library",
|
||||
"//tools/analyzers/modernize/rangeint:go_default_library",
|
||||
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
|
||||
"//tools/analyzers/modernize/slicescontains:go_default_library",
|
||||
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
|
||||
"//tools/analyzers/modernize/slicessort:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
|
||||
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsseq:go_default_library",
|
||||
"//tools/analyzers/modernize/testingcontext:go_default_library",
|
||||
"//tools/analyzers/modernize/waitgroup:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/nopanic:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -205,6 +205,26 @@ prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
|
||||
# This is necessary as this dependency is required by rules_go and they do not accept dependency
|
||||
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
|
||||
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
|
||||
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
|
||||
# Copy the patches and http_archive updates from rules_go here.
|
||||
http_archive(
|
||||
name = "org_golang_x_tools",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:org_golang_x_tools-deletegopls.patch",
|
||||
"//third_party:org_golang_x_tools-gazelle.patch",
|
||||
],
|
||||
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
|
||||
strip_prefix = "tools-0.38.0",
|
||||
urls = [
|
||||
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
|
||||
@@ -56,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
for field := range strings.SplitSeq(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
vs := make([]*structs.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
for i := range svr {
|
||||
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
|
||||
// MarshalText returns a byte representation of the text from Uint64String.
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
return fmt.Appendf(nil, "%d", s), nil
|
||||
}
|
||||
|
||||
// VersionResponse is a JSON representation of a field in the builder API header response.
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func LogRequests(
|
||||
ctx context.Context,
|
||||
method string, req,
|
||||
reply interface{},
|
||||
reply any,
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
opts ...grpc.CallOption,
|
||||
|
||||
@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
|
||||
}
|
||||
|
||||
type GetSpecResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
|
||||
|
||||
v := New()
|
||||
pre := !v.IsSet()
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
v.SetTo(false)
|
||||
for j := 0; j < i; j++ {
|
||||
for range i {
|
||||
pre = v.Toggle()
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Set()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.IsSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.UnSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader And Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Toggle()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
|
||||
func BenchmarkMutexRead(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
_ = v
|
||||
m.RUnlock()
|
||||
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueRead(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.Load() != nil
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.IsSet()
|
||||
}
|
||||
}
|
||||
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
func BenchmarkMutexWrite(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
v = true
|
||||
m.RUnlock()
|
||||
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueWrite(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Store(true)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Set()
|
||||
}
|
||||
}
|
||||
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
func BenchmarkMutexCAS(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
if !v {
|
||||
v = true
|
||||
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.SetToIf(false, true)
|
||||
}
|
||||
}
|
||||
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
func BenchmarkMutexToggle(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
v = !v
|
||||
m.Unlock()
|
||||
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolToggle(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Toggle()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
func init() {
|
||||
input = make([][]byte, benchmarkElements)
|
||||
for i := 0; i < benchmarkElements; i++ {
|
||||
for i := range benchmarkElements {
|
||||
input[i] = make([]byte, benchmarkElementSize)
|
||||
_, err := rand.Read(input[i])
|
||||
if err != nil {
|
||||
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
|
||||
output := make([][]byte, len(input))
|
||||
for i := range input {
|
||||
copy(output, input)
|
||||
for j := 0; j < benchmarkHashRuns; j++ {
|
||||
for range benchmarkHashRuns {
|
||||
hash := sha256.Sum256(output[i])
|
||||
output[i] = hash[:]
|
||||
}
|
||||
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
|
||||
}
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
hash(input)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashMP(b *testing.B) {
|
||||
output := make([][]byte, len(input))
|
||||
for i := 0; i < b.N; i++ {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
for b.Loop() {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
return hash(input[offset : offset+entries]), nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Debounce events fired over a channel by a specified duration, ensuring no events
|
||||
// are handled until a certain interval of time has passed.
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
|
||||
var timer *time.Timer
|
||||
defer func() {
|
||||
if timer != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
@@ -93,9 +93,7 @@ func ExampleSubscriptionScope() {
|
||||
// Run a subscriber in the background.
|
||||
divsub := app.SubscribeResults('/', divs)
|
||||
mulsub := app.SubscribeResults('*', muls)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
defer fmt.Println("subscriber exited")
|
||||
defer divsub.Unsubscribe()
|
||||
defer mulsub.Unsubscribe()
|
||||
@@ -111,7 +109,7 @@ func ExampleSubscriptionScope() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// Interact with the app.
|
||||
app.Calc('/', 22, 11)
|
||||
|
||||
@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
|
||||
// Create a subscription that sends 10 integers on ch.
|
||||
ch := make(chan int)
|
||||
sub := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
select {
|
||||
case ch <- i:
|
||||
case <-quit:
|
||||
|
||||
@@ -3,6 +3,6 @@ package event
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
Subscribe(channel any) Subscription
|
||||
Send(value any) (nsent int)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
|
||||
|
||||
func subscribeInts(max, fail int, c chan<- int) Subscription {
|
||||
return NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < max; i++ {
|
||||
for i := range max {
|
||||
if i >= fail {
|
||||
return errInts
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
|
||||
channel := make(chan int)
|
||||
sub := subscribeInts(10, 2, channel)
|
||||
loop:
|
||||
for want := 0; want < 10; want++ {
|
||||
for want := range 10 {
|
||||
select {
|
||||
case got := <-channel:
|
||||
require.Equal(t, want, got)
|
||||
|
||||
@@ -107,15 +107,13 @@ func TestLockUnlock(_ *testing.T) {
|
||||
|
||||
func TestLockUnlock_CleansUnused(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
lock := NewMultilock("dog", "cat", "owl")
|
||||
lock.Lock()
|
||||
assert.Equal(t, 3, len(locks.list))
|
||||
lock.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
// We expect that unlocking completely cleared the locks list
|
||||
// given all 3 lock keys were unused at time of unlock.
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
// WorkerResults are the results of a scatter worker.
|
||||
type WorkerResults struct {
|
||||
Offset int
|
||||
Extent interface{}
|
||||
Extent any
|
||||
}
|
||||
|
||||
// Scatter scatters a computation across multiple goroutines.
|
||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Any error that occurs in the workers will be passed back to the calling function.
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
|
||||
if inputLen <= 0 {
|
||||
return nil, errors.New("input length must be greater than 0")
|
||||
}
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
|
||||
inValues[i] = i
|
||||
}
|
||||
outValues := make([]int, test.inValues)
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
extent := make([]int, entries)
|
||||
for i := 0; i < entries; i++ {
|
||||
for i := range entries {
|
||||
extent[i] = inValues[offset+i] * 2
|
||||
}
|
||||
return extent, nil
|
||||
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
|
||||
func TestMutex(t *testing.T) {
|
||||
totalRuns := 1048576
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
mu.Unlock()
|
||||
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
|
||||
func TestError(t *testing.T) {
|
||||
totalRuns := 1024
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
if val == 1011 {
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for range blobCount {
|
||||
for range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for j := range numberOfColumns {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
|
||||
@@ -209,16 +209,14 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 1000 {
|
||||
wg.Go(func() {
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
_, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -817,7 +817,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
var missingIndices any = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []consensusblocks.ROBlock
|
||||
for i := 0; i < 97; i++ {
|
||||
for i := range 97 {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
@@ -1323,7 +1323,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
@@ -1949,7 +1949,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// Check that the invalid blocks are not in database
|
||||
for i := 0; i < 19-13; i++ {
|
||||
for i := range 19 - 13 {
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
|
||||
}
|
||||
|
||||
@@ -2879,7 +2879,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
|
||||
// set a better sync aggregate
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
|
||||
@@ -216,13 +216,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
|
||||
@@ -412,8 +412,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
@@ -432,8 +431,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ type EventFeedWrapper struct {
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
@@ -116,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
func (w *EventFeedWrapper) Send(value any) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
for i := range reg {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
|
||||
4
beacon-chain/cache/active_balance_test.go
vendored
4
beacon-chain/cache/active_balance_test.go
vendored
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
|
||||
func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
|
||||
2
beacon-chain/cache/committee.go
vendored
2
beacon-chain/cache/committee.go
vendored
@@ -51,7 +51,7 @@ type CommitteeCache struct {
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
func committeeKeyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
|
||||
6
beacon-chain/cache/committee_fuzz_test.go
vendored
6
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
k, err := committeeKeyFn(c)
|
||||
require.NoError(t, err)
|
||||
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
for i := range 2000 {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
|
||||
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt := NewDepositTree()
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt, err := trie.NewTrie(33)
|
||||
require.NoError(b, err)
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = tr.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = dt.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
size := ds.depositCount
|
||||
index := len(ds.finalized)
|
||||
root := trie.ZeroHashes[0]
|
||||
for i := 0; i < DepositContractDepth; i++ {
|
||||
for i := range DepositContractDepth {
|
||||
if (size & 1) == 1 {
|
||||
if index == 0 {
|
||||
break
|
||||
|
||||
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -47,15 +47,13 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
|
||||
c.Enable()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Get call will only terminate when
|
||||
// it is not longer in progress.
|
||||
obj, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, obj)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
c.MarkNotInProgress(r)
|
||||
wg.Wait()
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -236,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
|
||||
// The key is the `currentSyncCommitteeRoot` within the field.
|
||||
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
|
||||
func keyFn(obj interface{}) (string, error) {
|
||||
func keyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return "", errNotSyncCommitteeIndexPosition
|
||||
|
||||
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
@@ -12,12 +12,12 @@ import (
|
||||
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
coms := c.GetAllSubnets(50)
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
|
||||
@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
for i := range syncBits {
|
||||
if syncBits.BitAt(uint64(i)) {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
require.DeepEqual(t, true, votedMap[pk])
|
||||
|
||||
@@ -195,10 +195,7 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
|
||||
@@ -122,11 +122,8 @@ func ProcessInactivityScores(
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
score := min(recoveryRate, v.InactivityScore)
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
@@ -242,7 +239,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
}
|
||||
|
||||
balances := beaconState.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vals[i].BeforeEpochTransitionBalance = balances[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
|
||||
func getState(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
r, err := helpers.BlockRootAtSlot(s, 0)
|
||||
require.NoError(t, err)
|
||||
var pendingAtts []*ethpb.PendingAttestation
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
pendingAtts = append(pendingAtts, ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: primitives.CommitteeIndex(i),
|
||||
|
||||
@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
for i := range indices {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
e := ðpb.Eth1Data{}
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
|
||||
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
eth1data2 := ðpb.Eth1Data{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(eth1data2)
|
||||
AreEth1DataEqual(eth1data, eth1data2)
|
||||
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
var stateVotes []*ethpb.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
for i := range 100000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
blockBody := ðpb.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
p := ðpb.ProposerSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
a := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
|
||||
attestationData := ðpb.AttestationData{}
|
||||
attestationData2 := ðpb.AttestationData{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attestationData)
|
||||
fuzzer.Fuzz(attestationData2)
|
||||
IsSlashableAttestationData(attestationData, attestationData2)
|
||||
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
idxAttestation := ðpb.IndexedAttestation{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fork := ðpb.Fork{}
|
||||
var slot primitives.Slot
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(rawVal)
|
||||
fuzzer.Fuzz(fork)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
|
||||
deposits := make([]*ethpb.Eth1Data, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
deposits[i] = ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
for i := uint64(0); i < period; i++ {
|
||||
for range period {
|
||||
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
|
||||
@@ -27,7 +27,7 @@ func init() {
|
||||
|
||||
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -293,7 +293,7 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -851,8 +851,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -36,7 +36,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
for i := range primitives.ValidatorIndex(10) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
@@ -122,7 +122,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
for i := range primitives.ValidatorIndex(2) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
@@ -149,7 +149,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
for i := range primitives.ValidatorIndex(4) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
@@ -528,7 +528,7 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
for i := range numVals {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -82,7 +82,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -108,7 +108,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: 10,
|
||||
@@ -157,7 +157,7 @@ func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(b, err)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
if err := st.SetValidators(genValidators(100000)); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -329,10 +329,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
|
||||
if effectiveBal != val.EffectiveBalance() {
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
base := ðpb.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(base)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -218,7 +218,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -314,28 +314,28 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
|
||||
|
||||
func buildState(t testing.TB, slot primitives.Slot, validatorCount uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
s, err := util.NewBeaconState()
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -56,7 +56,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -93,7 +93,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -128,7 +128,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
return nil, errors.Wrap(err, "could not get proposer attestation delta")
|
||||
}
|
||||
validatorBals := state.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vp[i].BeforeEpochTransitionBalance = validatorBals[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -63,7 +63,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -99,7 +99,7 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -131,7 +131,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -176,28 +176,28 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -17,5 +17,5 @@ type Event struct {
|
||||
// Type is the type of event.
|
||||
Type EventType
|
||||
// Data is event-specific data.
|
||||
Data interface{}
|
||||
Data any
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -5,7 +5,7 @@ package helpers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -515,9 +515,7 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
slices.Sort(sortedIndices)
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -122,7 +122,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
activationEpoch = 3
|
||||
@@ -151,7 +151,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -234,7 +234,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -266,7 +266,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -323,7 +323,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
activeRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 300000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -512,8 +512,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -523,7 +522,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 3000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -546,8 +545,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -557,7 +555,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 128000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -576,8 +574,8 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -592,7 +590,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -611,8 +609,8 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -627,7 +625,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 4000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -646,8 +644,8 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -663,7 +661,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
committeeSize := uint64(16)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(committeeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -688,7 +686,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
|
||||
func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -732,7 +730,7 @@ func TestAttestationCommitteesFromState(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -768,7 +766,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -934,7 +932,7 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
|
||||
for epochOffset := range primitives.Epoch(2) {
|
||||
targetEpoch := epoch + epochOffset
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
func TestRandaoMix_OK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -52,7 +52,7 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
|
||||
func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -96,7 +96,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
|
||||
@@ -239,28 +239,28 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -23,7 +23,7 @@ var maxShuffleListSize uint64 = 1 << 40
|
||||
func SplitIndices(l []uint64, n uint64) [][]uint64 {
|
||||
var divided [][]uint64
|
||||
var lSize = uint64(len(l))
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
start := slice.SplitOffset(lSize, n, i)
|
||||
end := slice.SplitOffset(lSize, n, i+1)
|
||||
divided = append(divided, l[start:end])
|
||||
@@ -103,10 +103,7 @@ func ComputeShuffledIndex(index primitives.ValidatorIndex, indexCount uint64, se
|
||||
pivot := hash8Int % indexCount
|
||||
flip := (pivot + indexCount - uint64(index)) % indexCount
|
||||
// Consider every pair only once by picking the highest pair index to retrieve randomness.
|
||||
position := uint64(index)
|
||||
if flip > position {
|
||||
position = flip
|
||||
}
|
||||
position := max(flip, uint64(index))
|
||||
// Add position except its last byte to []buf for randomness,
|
||||
// it will be used later to select a bit from the resulting hash.
|
||||
binary.LittleEndian.PutUint64(posBuffer[:8], position>>8)
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestShuffleList_OK(t *testing.T) {
|
||||
var list1 []primitives.ValidatorIndex
|
||||
seed1 := [32]byte{1, 128, 12}
|
||||
seed2 := [32]byte{2, 128, 12}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
list1 = append(list1, primitives.ValidatorIndex(i))
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
numValidators := uint64(64000)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
for i := range numValidators {
|
||||
l = append(l, i)
|
||||
}
|
||||
split := SplitIndices(l, uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -104,7 +104,7 @@ func BenchmarkIndexComparison(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
b.Run(fmt.Sprintf("Indexwise_ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
// Simulate a list-shuffle by running shuffle-index listSize times.
|
||||
for j := primitives.ValidatorIndex(0); uint64(j) < listSize; j++ {
|
||||
_, err := ShuffledIndex(j, listSize, seed)
|
||||
@@ -120,11 +120,11 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
testIndices := make([]primitives.ValidatorIndex, listSize)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
for i := range listSize {
|
||||
testIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
b.Run(fmt.Sprintf("ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
_, err := ShuffleList(testIndices, seed)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
@@ -161,12 +161,12 @@ func TestSplitIndicesAndOffset_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
validators := uint64(64000)
|
||||
for i := uint64(0); i < validators; i++ {
|
||||
for i := range validators {
|
||||
l = append(l, i)
|
||||
}
|
||||
chunks := uint64(6)
|
||||
split := SplitIndices(l, chunks)
|
||||
for i := uint64(0); i < chunks; i++ {
|
||||
for i := range chunks {
|
||||
if !reflect.DeepEqual(split[i], l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)]) {
|
||||
t.Errorf("Want: %v got: %v", l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)], split[i])
|
||||
break
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestCurrentPeriodPositions(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -56,7 +56,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -87,7 +87,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -116,7 +116,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -144,7 +144,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -175,7 +175,7 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -203,7 +203,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -231,7 +231,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -262,7 +262,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -304,7 +304,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -332,7 +332,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -363,7 +363,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -391,7 +391,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -449,7 +449,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -184,7 +184,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -861,7 +861,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
|
||||
validators := make([]*ethpb.Validator, 4)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < 4; i++ {
|
||||
for i := range uint64(4) {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -270,7 +270,7 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
validators := make([]*ethpb.Validator, valCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < valCount; i++ {
|
||||
for i := range valCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -100,7 +100,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
// Generate random KZG commitments `blobCount` blobs.
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -216,7 +216,7 @@ func rotateRowsToCols(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, nu
|
||||
if len(cells) != len(proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
for j := range numCols {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
proofCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
|
||||
@@ -28,8 +28,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
block, err := benchmark.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -60,8 +59,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -83,8 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
|
||||
// at run time.
|
||||
_, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy())
|
||||
@@ -96,8 +93,7 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(b.Context())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -113,8 +109,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
_, err = beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -128,7 +123,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := proto.Marshal(natState)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -137,7 +132,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := natState.MarshalSSZ()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -157,7 +152,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.NoError(b, proto.Unmarshal(protoObject, ðpb.BeaconState{}))
|
||||
}
|
||||
})
|
||||
@@ -165,7 +160,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
sszState := ðpb.BeaconState{}
|
||||
require.NoError(b, sszState.UnmarshalSSZ(sszObject))
|
||||
}
|
||||
@@ -174,7 +169,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
|
||||
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
|
||||
clonedStates := make([]state.BeaconState, runAmount)
|
||||
for i := 0; i < runAmount; i++ {
|
||||
for i := range runAmount {
|
||||
clonedStates[i] = beaconState.Copy()
|
||||
}
|
||||
return clonedStates
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
|
||||
// prepare copies for both states
|
||||
var setups []state.BeaconState
|
||||
for i := uint64(0); i < 300; i++ {
|
||||
for i := range uint64(300) {
|
||||
var st state.BeaconState
|
||||
if i%2 == 0 {
|
||||
st = s1
|
||||
|
||||
@@ -95,7 +95,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -104,17 +104,17 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
scoresMissing := len(preState.Validators()) - len(scores)
|
||||
if scoresMissing > 0 {
|
||||
for i := 0; i < scoresMissing; i++ {
|
||||
for range scoresMissing {
|
||||
scores = append(scores, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -131,17 +131,17 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestGenesisBeaconState_1000(t *testing.T) {
|
||||
deposits := make([]*ethpb.Deposit, 300000)
|
||||
var genesisTime uint64
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&deposits)
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
@@ -40,7 +40,7 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
|
||||
preState, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
fuzzer.Fuzz(preState)
|
||||
@@ -60,7 +60,7 @@ func TestIsValidGenesisState_100000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
var chainStartDepositCount, currentTime uint64
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(&chainStartDepositCount)
|
||||
fuzzer.Fuzz(¤tTime)
|
||||
IsValidGenesisState(chainStartDepositCount, currentTime)
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -45,7 +45,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -68,7 +68,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessSlot(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -86,7 +86,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
|
||||
slot := primitives.Slot(0)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&slot)
|
||||
s, err := ProcessSlots(ctx, state, slot)
|
||||
@@ -105,7 +105,7 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -128,7 +128,7 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -148,7 +148,7 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
time.CanProcessEpoch(state)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -180,7 +180,7 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil || sb.Block.Body.Eth1Data == nil {
|
||||
|
||||
@@ -754,8 +754,7 @@ func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -768,8 +767,7 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -782,8 +780,7 @@ func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
|
||||
@@ -307,7 +307,7 @@ func SlashValidator(
|
||||
// ActivatedValidatorIndices determines the indices activated during the given epoch.
|
||||
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
activations := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
if val.ActivationEpoch <= epoch && epoch < val.ExitEpoch {
|
||||
activations = append(activations, primitives.ValidatorIndex(i))
|
||||
@@ -319,7 +319,7 @@ func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Valid
|
||||
// SlashedValidatorIndices determines the indices slashed during the given epoch.
|
||||
func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
slashed := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch, epoch+params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
if val.WithdrawableEpoch == maxWithdrawableEpoch && val.Slashed {
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -226,7 +226,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
|
||||
@@ -26,7 +26,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
for i := range commits {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
|
||||
@@ -44,7 +44,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
expected := make([]blocks.ROBlob, 0, nBlobs)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
for i := range commits {
|
||||
if entry.diskSummary.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -112,12 +112,10 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
blob := testSidecars[0]
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100 {
|
||||
wg.Go(func() {
|
||||
require.NoError(t, b.Save(blob))
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
if !s.mask[i] {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func (aq AncestryQuery) Span() primitives.Slot {
|
||||
// QueryFilter defines a generic interface for type-asserting
|
||||
// specific filters to use in querying DB objects.
|
||||
type QueryFilter struct {
|
||||
queries map[FilterType]interface{}
|
||||
queries map[FilterType]any
|
||||
ancestry AncestryQuery
|
||||
}
|
||||
|
||||
@@ -82,14 +82,14 @@ type QueryFilter struct {
|
||||
// certain Ethereum data types by attribute.
|
||||
func NewFilter() *QueryFilter {
|
||||
return &QueryFilter{
|
||||
queries: make(map[FilterType]interface{}),
|
||||
queries: make(map[FilterType]any),
|
||||
}
|
||||
}
|
||||
|
||||
// Filters returns and underlying map of FilterType to interface{}, giving us
|
||||
// a copy of the currently set filters which can then be iterated over and type
|
||||
// asserted for use anywhere.
|
||||
func (q *QueryFilter) Filters() map[FilterType]interface{} {
|
||||
func (q *QueryFilter) Filters() map[FilterType]any {
|
||||
return q.queries
|
||||
}
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interface
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(keys); i++ {
|
||||
for i := range keys {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
@@ -307,7 +307,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(keys); i++ {
|
||||
for i := range keys {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
return nil
|
||||
@@ -1063,7 +1063,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
|
||||
func blockRootsBySlotRange(
|
||||
ctx context.Context,
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded any,
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
|
||||
// Even with a full cache, saving new blocks should not cause
|
||||
// duplicated blocks in the DB.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ func TestStore_BlocksHandleZeroCase(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
numBlocks := 10
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
for i := range totalBlocks {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -279,7 +279,7 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
|
||||
numBlocks := 10
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
|
||||
// Save blocks from slot 1 onwards.
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
for i := range totalBlocks {
|
||||
b, err := tt.newBlock(primitives.Slot(i+1), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -927,7 +927,7 @@ func TestStore_Blocks_Retrieve_SlotRange(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -947,7 +947,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slots := params.BeaconConfig().SlotsPerEpoch.Mul(7)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, slots)
|
||||
for i := primitives.Slot(0); i < slots; i++ {
|
||||
for i := range slots {
|
||||
b, err := tt.newBlock(i, bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -971,7 +971,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -1140,7 +1140,7 @@ func TestStore_SaveBlocks_HasCachedBlocks(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
b[i] = blk
|
||||
@@ -1164,7 +1164,7 @@ func TestStore_SaveBlocks_HasRootsMatched(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
b[i] = blk
|
||||
|
||||
@@ -58,7 +58,7 @@ func encode(ctx context.Context, msg proto.Message) ([]byte, error) {
|
||||
}
|
||||
|
||||
// isSSZStorageFormat returns true if the object type should be saved in SSZ encoded format.
|
||||
func isSSZStorageFormat(obj interface{}) bool {
|
||||
func isSSZStorageFormat(obj any) bool {
|
||||
switch obj.(type) {
|
||||
case *ethpb.BeaconState:
|
||||
return true
|
||||
|
||||
@@ -161,7 +161,7 @@ func TestStore_IsFinalizedChildBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
// All blocks up to slotsPerEpoch should have a finalized child block.
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
for i := range slotsPerEpoch {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
|
||||
|
||||
@@ -29,14 +29,14 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
var err error
|
||||
|
||||
sampleRoot := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
for i := range 32 {
|
||||
sampleRoot[i] = byte(i)
|
||||
}
|
||||
|
||||
sampleExecutionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
sampleExecutionBranch[i] = make([]byte, 32)
|
||||
for j := 0; j < 32; j++ {
|
||||
for j := range 32 {
|
||||
sampleExecutionBranch[i][j] = byte(i + j)
|
||||
}
|
||||
}
|
||||
@@ -460,7 +460,7 @@ func createDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
for i := range syncCommitteeSize {
|
||||
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
nextSyncCommittee := &pb.SyncCommittee{
|
||||
@@ -479,7 +479,7 @@ func createDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
}
|
||||
|
||||
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
|
||||
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
|
||||
for i := range fieldparams.ExecutionBranchDepth {
|
||||
executionBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
@@ -731,7 +731,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
for i := range syncCommitteeSize {
|
||||
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
currentSyncCommittee := &pb.SyncCommittee{
|
||||
@@ -750,7 +750,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
|
||||
}
|
||||
|
||||
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
|
||||
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
|
||||
for i := range fieldparams.ExecutionBranchDepth {
|
||||
executionBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
|
||||
@@ -922,7 +922,7 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot primitives.Slot)
|
||||
indices := [][]byte{
|
||||
bytesutil.SlotToBytesBigEndian(slot),
|
||||
}
|
||||
for i := 0; i < len(buckets); i++ {
|
||||
for i := range buckets {
|
||||
indicesByBucket[string(buckets[i])] = indices[i]
|
||||
}
|
||||
return indicesByBucket
|
||||
|
||||
@@ -318,7 +318,7 @@ func TestState_CanSaveRetrieveValidatorEntriesFromCache(t *testing.T) {
|
||||
assert.Equal(t, true, db.HasState(t.Context(), r))
|
||||
|
||||
// check if the state is in cache
|
||||
for i := 0; i < len(stateValidators); i++ {
|
||||
for i := range stateValidators {
|
||||
hash, hashErr := stateValidators[i].HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
|
||||
@@ -505,7 +505,7 @@ func TestStore_StatesBatchDelete(t *testing.T) {
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
evenBlockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
for i := range totalBlocks {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = primitives.Slot(i)
|
||||
var err error
|
||||
@@ -874,7 +874,7 @@ func TestAltairState_CanDelete(t *testing.T) {
|
||||
|
||||
func validators(limit int) []*ethpb.Validator {
|
||||
var vals []*ethpb.Validator
|
||||
for i := 0; i < limit; i++ {
|
||||
for i := range limit {
|
||||
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.LittleEndian.PutUint64(pubKey, mathRand.Uint64())
|
||||
val := ðpb.Validator{
|
||||
@@ -893,13 +893,12 @@ func validators(limit int) []*ethpb.Validator {
|
||||
}
|
||||
|
||||
func checkStateSaveTime(b *testing.B, saveCount int) {
|
||||
b.StopTimer()
|
||||
|
||||
db := setupDB(b)
|
||||
initialSetOfValidators := validators(100000)
|
||||
|
||||
// construct some states and save to randomize benchmark.
|
||||
for i := 0; i < saveCount; i++ {
|
||||
for range saveCount {
|
||||
key := make([]byte, 32)
|
||||
_, err := rand.Read(key)
|
||||
require.NoError(b, err)
|
||||
@@ -925,14 +924,13 @@ func checkStateSaveTime(b *testing.B, saveCount int) {
|
||||
require.NoError(b, st.SetValidators(initialSetOfValidators))
|
||||
|
||||
b.ReportAllocs()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
require.NoError(b, db.SaveState(b.Context(), st, r))
|
||||
}
|
||||
}
|
||||
|
||||
func checkStateReadTime(b *testing.B, saveCount int) {
|
||||
b.StopTimer()
|
||||
|
||||
db := setupDB(b)
|
||||
initialSetOfValidators := validators(100000)
|
||||
@@ -945,7 +943,7 @@ func checkStateReadTime(b *testing.B, saveCount int) {
|
||||
require.NoError(b, db.SaveState(b.Context(), st, r))
|
||||
|
||||
// construct some states and save to randomize benchmark.
|
||||
for i := 0; i < saveCount; i++ {
|
||||
for range saveCount {
|
||||
key := make([]byte, 32)
|
||||
_, err := rand.Read(key)
|
||||
require.NoError(b, err)
|
||||
@@ -965,8 +963,8 @@ func checkStateReadTime(b *testing.B, saveCount int) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := db.State(b.Context(), r)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestSplitRoots(t *testing.T) {
|
||||
bt := make([][32]byte, 0)
|
||||
for _, x := range []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {
|
||||
var b [32]byte
|
||||
for i := 0; i < 32; i++ {
|
||||
for i := range 32 {
|
||||
b[i] = x
|
||||
}
|
||||
bt = append(bt, b)
|
||||
|
||||
@@ -64,8 +64,7 @@ func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) {
|
||||
require.NoError(b, db.SaveState(ctx, st, root))
|
||||
db.stateSummaryCache.clear()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.NoError(b, db.SaveLastValidatedCheckpoint(ctx, cp))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
|
||||
case <-ctx.Done():
|
||||
return numBatches, nil
|
||||
default:
|
||||
for i := 0; i < defaultNumBatchesToPrune; i++ {
|
||||
for range defaultNumBatchesToPrune {
|
||||
slotsDeleted, err := p.db.DeleteHistoricalDataBeforeSlot(ctx, pruneUpto, defaultPrunableBatchSize)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestStore_PruneProposalsAtEpoch(t *testing.T) {
|
||||
expectedNumPruned := 2 * uint(pruningLimitEpoch+1) * uint(slotsPerEpoch)
|
||||
|
||||
proposals := make([]*slashertypes.SignedBlockHeaderWrapper, 0, uint64(currentEpoch)*uint64(slotsPerEpoch)*2)
|
||||
for i := primitives.Epoch(0); i < currentEpoch; i++ {
|
||||
for i := range currentEpoch {
|
||||
startSlot, err := slots.EpochStart(i)
|
||||
require.NoError(t, err)
|
||||
endSlot, err := slots.EpochStart(i + 1)
|
||||
@@ -86,7 +86,7 @@ func TestStore_PruneProposalsAtEpoch(t *testing.T) {
|
||||
require.Equal(t, expectedNumPruned, actualNumPruned)
|
||||
|
||||
// Everything before epoch 10 should be deleted.
|
||||
for i := primitives.Epoch(0); i < pruningLimitEpoch; i++ {
|
||||
for i := range pruningLimitEpoch {
|
||||
err = beaconDB.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(proposalRecordsBucket)
|
||||
startSlot, err := slots.EpochStart(i)
|
||||
@@ -164,7 +164,7 @@ func TestStore_PruneAttestations_OK(t *testing.T) {
|
||||
expectedNumPruned := 2 * uint(pruningLimitEpoch+1) * uint(slotsPerEpoch)
|
||||
|
||||
attestations := make([]*slashertypes.IndexedAttestationWrapper, 0, uint64(currentEpoch)*uint64(slotsPerEpoch)*2)
|
||||
for i := primitives.Epoch(0); i < currentEpoch; i++ {
|
||||
for i := range currentEpoch {
|
||||
startSlot, err := slots.EpochStart(i)
|
||||
require.NoError(t, err)
|
||||
endSlot, err := slots.EpochStart(i + 1)
|
||||
@@ -191,7 +191,7 @@ func TestStore_PruneAttestations_OK(t *testing.T) {
|
||||
require.Equal(t, expectedNumPruned, actualNumPruned)
|
||||
|
||||
// Everything before epoch 10 should be deleted.
|
||||
for i := primitives.Epoch(0); i < pruningLimitEpoch; i++ {
|
||||
for i := range pruningLimitEpoch {
|
||||
err = beaconDB.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attestationDataRootsBucket)
|
||||
startSlot, err := slots.EpochStart(i)
|
||||
|
||||
@@ -429,7 +429,7 @@ func (s *Store) SaveSlasherChunks(
|
||||
encodedKeys := make([][]byte, chunksCount)
|
||||
encodedChunks := make([][]byte, chunksCount)
|
||||
|
||||
for i := 0; i < chunksCount; i++ {
|
||||
for i := range chunksCount {
|
||||
chunkKey, chunk := chunkKeys[i], chunks[i]
|
||||
encodedKey := append(encodedKind, chunkKey...)
|
||||
|
||||
@@ -452,7 +452,7 @@ func (s *Store) SaveSlasherChunks(
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(slasherChunksBucket)
|
||||
|
||||
for i := 0; i < batchSize; i++ {
|
||||
for i := range batchSize {
|
||||
if err := bkt.Put(encodedKeysBatch[i], encodedChunksBatch[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -617,7 +617,7 @@ func (s *Store) HighestAttestations(
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
for i := 0; i < len(encodedIndices); i++ {
|
||||
for i := range encodedIndices {
|
||||
c := signingRootsBkt.Cursor()
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
if suffixForAttestationRecordsKey(k, encodedIndices[i]) {
|
||||
@@ -659,7 +659,7 @@ func keyForValidatorProposal(slot primitives.Slot, proposerIndex primitives.Vali
|
||||
|
||||
func encodeSlasherChunk(chunk []uint16) ([]byte, error) {
|
||||
val := make([]byte, 0)
|
||||
for i := 0; i < len(chunk); i++ {
|
||||
for i := range chunk {
|
||||
val = append(val, ssz.MarshalUint16(make([]byte, 0), chunk[i])...)
|
||||
}
|
||||
if len(val) == 0 {
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
|
||||
// Defines attestations to save and retrieve.
|
||||
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
for i := 0; i < attestationsCount; i++ {
|
||||
for i := range attestationsCount {
|
||||
var dataRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
attWrappers[i] = attWrapper
|
||||
}
|
||||
attWrappersElectra := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
for i := 0; i < attestationsCount; i++ {
|
||||
for i := range attestationsCount {
|
||||
var dataRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
@@ -107,13 +107,13 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
indices := make([]primitives.ValidatorIndex, validatorsCount)
|
||||
epochs := make([]primitives.Epoch, validatorsCount)
|
||||
|
||||
for i := 0; i < validatorsCount; i++ {
|
||||
for i := range validatorsCount {
|
||||
indices[i] = primitives.ValidatorIndex(i)
|
||||
epochs[i] = primitives.Epoch(i)
|
||||
}
|
||||
|
||||
epochsByValidator := make(map[primitives.ValidatorIndex]primitives.Epoch, validatorsCount)
|
||||
for i := 0; i < validatorsCount; i++ {
|
||||
for i := range validatorsCount {
|
||||
epochsByValidator[indices[i]] = epochs[i]
|
||||
}
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
minChunkKeys := make([][]byte, totalChunks)
|
||||
minChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
for i := range totalChunks {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
minChunkKeys[i] = chunkKey
|
||||
@@ -223,7 +223,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
for j := range chunk {
|
||||
chunk[j] = uint16(i + j)
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
maxChunkKeys := make([][]byte, totalChunks)
|
||||
maxChunks := make([][]uint16, totalChunks)
|
||||
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
for i := range totalChunks {
|
||||
// Create chunk key.
|
||||
chunkKey := ssz.MarshalUint64(make([]byte, 0), uint64(i+1))
|
||||
maxChunkKeys[i] = chunkKey
|
||||
@@ -242,7 +242,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) {
|
||||
// Create chunk.
|
||||
chunk := make([]uint16, elemsPerChunk)
|
||||
|
||||
for j := 0; j < len(chunk); j++ {
|
||||
for j := range chunk {
|
||||
chunk[j] = uint16(i + j + 1)
|
||||
}
|
||||
|
||||
@@ -315,7 +315,7 @@ func TestStore_SlasherChunk_PreventsSavingWrongLength(t *testing.T) {
|
||||
totalChunks := 64
|
||||
chunkKeys := make([][]byte, totalChunks)
|
||||
chunks := make([][]uint16, totalChunks)
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
for i := range totalChunks {
|
||||
chunks[i] = []uint16{}
|
||||
chunkKeys[i] = ssz.MarshalUint64(make([]byte, 0), uint64(i))
|
||||
}
|
||||
@@ -566,11 +566,11 @@ func TestStore_HighestAttestations(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkHighestAttestations(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
count := 10000
|
||||
valsPerAtt := 100
|
||||
indicesPerAtt := make([][]uint64, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
indicesForAtt := make([]uint64, valsPerAtt)
|
||||
for r := i * count; r < valsPerAtt*(i+1); r++ {
|
||||
indicesForAtt[i] = uint64(r)
|
||||
@@ -578,7 +578,7 @@ func BenchmarkHighestAttestations(b *testing.B) {
|
||||
indicesPerAtt[i] = indicesForAtt
|
||||
}
|
||||
atts := make([]*slashertypes.IndexedAttestationWrapper, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
}
|
||||
|
||||
@@ -587,27 +587,27 @@ func BenchmarkHighestAttestations(b *testing.B) {
|
||||
require.NoError(b, beaconDB.SaveAttestationRecordsForValidators(ctx, atts))
|
||||
|
||||
allIndices := make([]primitives.ValidatorIndex, 0, valsPerAtt*count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
indicesForAtt := make([]primitives.ValidatorIndex, valsPerAtt)
|
||||
for r := 0; r < valsPerAtt; r++ {
|
||||
for r := range valsPerAtt {
|
||||
indicesForAtt[r] = primitives.ValidatorIndex(atts[i].IndexedAttestation.GetAttestingIndices()[r])
|
||||
}
|
||||
allIndices = append(allIndices, indicesForAtt...)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := beaconDB.HighestAttestations(ctx, allIndices)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_CheckDoubleBlockProposals(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
count := 10000
|
||||
valsPerAtt := 100
|
||||
indicesPerAtt := make([][]uint64, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
indicesForAtt := make([]uint64, valsPerAtt)
|
||||
for r := i * count; r < valsPerAtt*(i+1); r++ {
|
||||
indicesForAtt[i] = uint64(r)
|
||||
@@ -615,7 +615,7 @@ func BenchmarkStore_CheckDoubleBlockProposals(b *testing.B) {
|
||||
indicesPerAtt[i] = indicesForAtt
|
||||
}
|
||||
atts := make([]*slashertypes.IndexedAttestationWrapper, count)
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
}
|
||||
|
||||
@@ -627,8 +627,8 @@ func BenchmarkStore_CheckDoubleBlockProposals(b *testing.B) {
|
||||
rand.Shuffle(count, func(i, j int) { atts[i], atts[j] = atts[j], atts[i] })
|
||||
|
||||
b.ReportAllocs()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := beaconDB.CheckAttesterDoubleVotes(ctx, atts)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ var (
|
||||
)
|
||||
|
||||
// hashKeyFn takes the hex string representation as the key for a headerInfo.
|
||||
func hashKeyFn(obj interface{}) (string, error) {
|
||||
func hashKeyFn(obj any) (string, error) {
|
||||
hInfo, ok := obj.(*types.HeaderInfo)
|
||||
if !ok {
|
||||
return "", ErrNotAHeaderInfo
|
||||
@@ -50,7 +50,7 @@ func hashKeyFn(obj interface{}) (string, error) {
|
||||
|
||||
// heightKeyFn takes the string representation of the block header number as the key
|
||||
// for a headerInfo.
|
||||
func heightKeyFn(obj interface{}) (string, error) {
|
||||
func heightKeyFn(obj any) (string, error) {
|
||||
hInfo, ok := obj.(*types.HeaderInfo)
|
||||
if !ok {
|
||||
return "", ErrNotAHeaderInfo
|
||||
@@ -164,6 +164,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user