mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 15:37:56 -05:00
Merge branch 'develop' into eas-bug
This commit is contained in:
19
BUILD.bazel
19
BUILD.bazel
@@ -197,6 +197,25 @@ nogo(
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/modernize/any:go_default_library",
|
||||
"//tools/analyzers/modernize/appendclipped:go_default_library",
|
||||
"//tools/analyzers/modernize/bloop:go_default_library",
|
||||
"//tools/analyzers/modernize/fmtappendf:go_default_library",
|
||||
"//tools/analyzers/modernize/forvar:go_default_library",
|
||||
"//tools/analyzers/modernize/mapsloop:go_default_library",
|
||||
"//tools/analyzers/modernize/minmax:go_default_library",
|
||||
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
|
||||
"//tools/analyzers/modernize/omitzero:go_default_library",
|
||||
"//tools/analyzers/modernize/rangeint:go_default_library",
|
||||
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
|
||||
"//tools/analyzers/modernize/slicescontains:go_default_library",
|
||||
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
|
||||
"//tools/analyzers/modernize/slicessort:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
|
||||
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsseq:go_default_library",
|
||||
"//tools/analyzers/modernize/testingcontext:go_default_library",
|
||||
"//tools/analyzers/modernize/waitgroup:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/nopanic:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -205,6 +205,26 @@ prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
|
||||
# This is necessary as this dependency is required by rules_go and they do not accept dependency
|
||||
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
|
||||
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
|
||||
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
|
||||
# Copy the patches and http_archive updates from rules_go here.
|
||||
http_archive(
|
||||
name = "org_golang_x_tools",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:org_golang_x_tools-deletegopls.patch",
|
||||
"//third_party:org_golang_x_tools-gazelle.patch",
|
||||
],
|
||||
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
|
||||
strip_prefix = "tools-0.38.0",
|
||||
urls = [
|
||||
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
|
||||
@@ -56,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
for field := range strings.SplitSeq(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
vs := make([]*structs.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
for i := range svr {
|
||||
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
|
||||
// MarshalText returns a byte representation of the text from Uint64String.
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
return fmt.Appendf(nil, "%d", s), nil
|
||||
}
|
||||
|
||||
// VersionResponse is a JSON representation of a field in the builder API header response.
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func LogRequests(
|
||||
ctx context.Context,
|
||||
method string, req,
|
||||
reply interface{},
|
||||
reply any,
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
opts ...grpc.CallOption,
|
||||
|
||||
@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
|
||||
}
|
||||
|
||||
type GetSpecResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
|
||||
|
||||
v := New()
|
||||
pre := !v.IsSet()
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
v.SetTo(false)
|
||||
for j := 0; j < i; j++ {
|
||||
for range i {
|
||||
pre = v.Toggle()
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Set()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.IsSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.UnSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader And Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Toggle()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
|
||||
func BenchmarkMutexRead(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
_ = v
|
||||
m.RUnlock()
|
||||
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueRead(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.Load() != nil
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.IsSet()
|
||||
}
|
||||
}
|
||||
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
func BenchmarkMutexWrite(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
v = true
|
||||
m.RUnlock()
|
||||
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueWrite(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Store(true)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Set()
|
||||
}
|
||||
}
|
||||
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
func BenchmarkMutexCAS(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
if !v {
|
||||
v = true
|
||||
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.SetToIf(false, true)
|
||||
}
|
||||
}
|
||||
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
func BenchmarkMutexToggle(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
v = !v
|
||||
m.Unlock()
|
||||
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolToggle(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Toggle()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
func init() {
|
||||
input = make([][]byte, benchmarkElements)
|
||||
for i := 0; i < benchmarkElements; i++ {
|
||||
for i := range benchmarkElements {
|
||||
input[i] = make([]byte, benchmarkElementSize)
|
||||
_, err := rand.Read(input[i])
|
||||
if err != nil {
|
||||
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
|
||||
output := make([][]byte, len(input))
|
||||
for i := range input {
|
||||
copy(output, input)
|
||||
for j := 0; j < benchmarkHashRuns; j++ {
|
||||
for range benchmarkHashRuns {
|
||||
hash := sha256.Sum256(output[i])
|
||||
output[i] = hash[:]
|
||||
}
|
||||
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
|
||||
}
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
hash(input)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashMP(b *testing.B) {
|
||||
output := make([][]byte, len(input))
|
||||
for i := 0; i < b.N; i++ {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
for b.Loop() {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
return hash(input[offset : offset+entries]), nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Debounce events fired over a channel by a specified duration, ensuring no events
|
||||
// are handled until a certain interval of time has passed.
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
|
||||
var timer *time.Timer
|
||||
defer func() {
|
||||
if timer != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
@@ -93,9 +93,7 @@ func ExampleSubscriptionScope() {
|
||||
// Run a subscriber in the background.
|
||||
divsub := app.SubscribeResults('/', divs)
|
||||
mulsub := app.SubscribeResults('*', muls)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
defer fmt.Println("subscriber exited")
|
||||
defer divsub.Unsubscribe()
|
||||
defer mulsub.Unsubscribe()
|
||||
@@ -111,7 +109,7 @@ func ExampleSubscriptionScope() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// Interact with the app.
|
||||
app.Calc('/', 22, 11)
|
||||
|
||||
@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
|
||||
// Create a subscription that sends 10 integers on ch.
|
||||
ch := make(chan int)
|
||||
sub := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
select {
|
||||
case ch <- i:
|
||||
case <-quit:
|
||||
|
||||
@@ -3,6 +3,6 @@ package event
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
Subscribe(channel any) Subscription
|
||||
Send(value any) (nsent int)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
|
||||
|
||||
func subscribeInts(max, fail int, c chan<- int) Subscription {
|
||||
return NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < max; i++ {
|
||||
for i := range max {
|
||||
if i >= fail {
|
||||
return errInts
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
|
||||
channel := make(chan int)
|
||||
sub := subscribeInts(10, 2, channel)
|
||||
loop:
|
||||
for want := 0; want < 10; want++ {
|
||||
for want := range 10 {
|
||||
select {
|
||||
case got := <-channel:
|
||||
require.Equal(t, want, got)
|
||||
|
||||
@@ -107,15 +107,13 @@ func TestLockUnlock(_ *testing.T) {
|
||||
|
||||
func TestLockUnlock_CleansUnused(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
lock := NewMultilock("dog", "cat", "owl")
|
||||
lock.Lock()
|
||||
assert.Equal(t, 3, len(locks.list))
|
||||
lock.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
// We expect that unlocking completely cleared the locks list
|
||||
// given all 3 lock keys were unused at time of unlock.
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
// WorkerResults are the results of a scatter worker.
|
||||
type WorkerResults struct {
|
||||
Offset int
|
||||
Extent interface{}
|
||||
Extent any
|
||||
}
|
||||
|
||||
// Scatter scatters a computation across multiple goroutines.
|
||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Any error that occurs in the workers will be passed back to the calling function.
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
|
||||
if inputLen <= 0 {
|
||||
return nil, errors.New("input length must be greater than 0")
|
||||
}
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
|
||||
inValues[i] = i
|
||||
}
|
||||
outValues := make([]int, test.inValues)
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
extent := make([]int, entries)
|
||||
for i := 0; i < entries; i++ {
|
||||
for i := range entries {
|
||||
extent[i] = inValues[offset+i] * 2
|
||||
}
|
||||
return extent, nil
|
||||
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
|
||||
func TestMutex(t *testing.T) {
|
||||
totalRuns := 1048576
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
mu.Unlock()
|
||||
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
|
||||
func TestError(t *testing.T) {
|
||||
totalRuns := 1024
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
if val == 1011 {
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for range blobCount {
|
||||
for range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for j := range numberOfColumns {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
|
||||
@@ -22,10 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -33,11 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -53,7 +50,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
return st
|
||||
}
|
||||
// Otherwise we need to advance the head state to the start of the target epoch.
|
||||
// At this point we can only have c.Epoch > headEpoch.
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
// Advance the head state to the start of the target epoch.
|
||||
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -181,6 +181,123 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 30 <-- 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
@@ -209,16 +326,14 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 1000 {
|
||||
wg.Go(func() {
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
_, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -306,7 +306,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
@@ -634,9 +634,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
if len(expected) > fieldparams.NumberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
@@ -817,11 +815,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
var missingIndices any = "all"
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
@@ -948,13 +945,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []consensusblocks.ROBlock
|
||||
for i := 0; i < 97; i++ {
|
||||
for i := range 97 {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
@@ -1323,7 +1323,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
@@ -1949,7 +1949,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// Check that the invalid blocks are not in database
|
||||
for i := 0; i < 19-13; i++ {
|
||||
for i := range 19 - 13 {
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
|
||||
}
|
||||
|
||||
@@ -2495,7 +2495,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
const countPlusOne = fieldparams.NumberOfColumns + 1
|
||||
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
@@ -2805,6 +2806,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -2879,7 +2884,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
|
||||
// set a better sync aggregate
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
|
||||
@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -216,13 +216,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -412,8 +412,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
@@ -432,8 +431,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
@@ -605,7 +603,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
numberOfColumns = uint64(128)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -613,7 +610,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
cfg.NumberOfColumns = numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -644,7 +640,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -682,7 +678,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -697,4 +693,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ type EventFeedWrapper struct {
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
@@ -116,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
func (w *EventFeedWrapper) Send(value any) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
for i := range reg {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
|
||||
4
beacon-chain/cache/active_balance_test.go
vendored
4
beacon-chain/cache/active_balance_test.go
vendored
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
|
||||
func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
|
||||
2
beacon-chain/cache/committee.go
vendored
2
beacon-chain/cache/committee.go
vendored
@@ -51,7 +51,7 @@ type CommitteeCache struct {
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
func committeeKeyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
|
||||
6
beacon-chain/cache/committee_fuzz_test.go
vendored
6
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
k, err := committeeKeyFn(c)
|
||||
require.NoError(t, err)
|
||||
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
for i := range 2000 {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
|
||||
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt := NewDepositTree()
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt, err := trie.NewTrie(33)
|
||||
require.NoError(b, err)
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = tr.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = dt.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
size := ds.depositCount
|
||||
index := len(ds.finalized)
|
||||
root := trie.ZeroHashes[0]
|
||||
for i := 0; i < DepositContractDepth; i++ {
|
||||
for i := range DepositContractDepth {
|
||||
if (size & 1) == 1 {
|
||||
if index == 0 {
|
||||
break
|
||||
|
||||
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -47,15 +47,13 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
|
||||
c.Enable()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Get call will only terminate when
|
||||
// it is not longer in progress.
|
||||
obj, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, obj)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
c.MarkNotInProgress(r)
|
||||
wg.Wait()
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -236,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
|
||||
// The key is the `currentSyncCommitteeRoot` within the field.
|
||||
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
|
||||
func keyFn(obj interface{}) (string, error) {
|
||||
func keyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return "", errNotSyncCommitteeIndexPosition
|
||||
|
||||
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
@@ -12,12 +12,12 @@ import (
|
||||
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
coms := c.GetAllSubnets(50)
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
|
||||
@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
for i := range syncBits {
|
||||
if syncBits.BitAt(uint64(i)) {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
require.DeepEqual(t, true, votedMap[pk])
|
||||
|
||||
@@ -195,10 +195,7 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
|
||||
@@ -122,11 +122,8 @@ func ProcessInactivityScores(
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
score := min(recoveryRate, v.InactivityScore)
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
@@ -242,7 +239,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
}
|
||||
|
||||
balances := beaconState.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vals[i].BeforeEpochTransitionBalance = balances[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
|
||||
func getState(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
r, err := helpers.BlockRootAtSlot(s, 0)
|
||||
require.NoError(t, err)
|
||||
var pendingAtts []*ethpb.PendingAttestation
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
pendingAtts = append(pendingAtts, ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: primitives.CommitteeIndex(i),
|
||||
|
||||
@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
for i := range indices {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
e := ðpb.Eth1Data{}
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
|
||||
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
eth1data2 := ðpb.Eth1Data{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(eth1data2)
|
||||
AreEth1DataEqual(eth1data, eth1data2)
|
||||
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
var stateVotes []*ethpb.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
for i := range 100000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
blockBody := ðpb.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
p := ðpb.ProposerSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
a := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
|
||||
attestationData := ðpb.AttestationData{}
|
||||
attestationData2 := ðpb.AttestationData{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attestationData)
|
||||
fuzzer.Fuzz(attestationData2)
|
||||
IsSlashableAttestationData(attestationData, attestationData2)
|
||||
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
idxAttestation := ðpb.IndexedAttestation{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fork := ðpb.Fork{}
|
||||
var slot primitives.Slot
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(rawVal)
|
||||
fuzzer.Fuzz(fork)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
|
||||
deposits := make([]*ethpb.Eth1Data, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
deposits[i] = ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
for i := uint64(0); i < period; i++ {
|
||||
for range period {
|
||||
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
|
||||
@@ -27,7 +27,7 @@ func init() {
|
||||
|
||||
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -293,7 +293,7 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -90,6 +90,9 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -260,11 +260,12 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
want bool
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
useCapellaSt bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "use older than bellatrix state",
|
||||
@@ -331,6 +332,17 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "capella state always enabled",
|
||||
payload: emptyPayload(),
|
||||
header: func() interfaces.ExecutionData {
|
||||
h, err := emptyPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}(),
|
||||
useCapellaSt: true,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -342,6 +354,8 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
} else if tt.useCapellaSt {
|
||||
st, _ = util.DeterministicGenesisStateCapella(t, 1)
|
||||
}
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
@@ -851,8 +865,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -36,7 +36,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
for i := range primitives.ValidatorIndex(10) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
@@ -122,7 +122,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
for i := range primitives.ValidatorIndex(2) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
@@ -149,7 +149,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
for i := range primitives.ValidatorIndex(4) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
@@ -528,7 +528,7 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
for i := range numVals {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -82,7 +82,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -108,7 +108,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: 10,
|
||||
@@ -157,7 +157,7 @@ func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(b, err)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
if err := st.SetValidators(genValidators(100000)); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -329,10 +329,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
|
||||
if effectiveBal != val.EffectiveBalance() {
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
base := ðpb.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(base)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -218,7 +218,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -314,28 +314,28 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
|
||||
|
||||
func buildState(t testing.TB, slot primitives.Slot, validatorCount uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
s, err := util.NewBeaconState()
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -56,7 +56,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -93,7 +93,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -128,7 +128,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
return nil, errors.Wrap(err, "could not get proposer attestation delta")
|
||||
}
|
||||
validatorBals := state.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vp[i].BeforeEpochTransitionBalance = validatorBals[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -63,7 +63,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -99,7 +99,7 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -131,7 +131,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -176,28 +176,28 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -17,5 +17,5 @@ type Event struct {
|
||||
// Type is the type of event.
|
||||
Type EventType
|
||||
// Data is event-specific data.
|
||||
Data interface{}
|
||||
Data any
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -5,7 +5,7 @@ package helpers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -515,9 +515,7 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
slices.Sort(sortedIndices)
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -122,7 +122,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
activationEpoch = 3
|
||||
@@ -151,7 +151,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -234,7 +234,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -266,7 +266,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -323,7 +323,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
activeRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 300000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -512,8 +512,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -523,7 +522,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 3000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -546,8 +545,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -557,7 +555,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 128000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -576,8 +574,8 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -592,7 +590,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -611,8 +609,8 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -627,7 +625,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 4000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -646,8 +644,8 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -663,7 +661,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
committeeSize := uint64(16)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(committeeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -688,7 +686,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
|
||||
func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -732,7 +730,7 @@ func TestAttestationCommitteesFromState(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -768,7 +766,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -934,7 +932,7 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
|
||||
for epochOffset := range primitives.Epoch(2) {
|
||||
targetEpoch := epoch + epochOffset
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
func TestRandaoMix_OK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -52,7 +52,7 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
|
||||
func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -96,7 +96,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
|
||||
@@ -239,28 +239,28 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -23,7 +23,7 @@ var maxShuffleListSize uint64 = 1 << 40
|
||||
func SplitIndices(l []uint64, n uint64) [][]uint64 {
|
||||
var divided [][]uint64
|
||||
var lSize = uint64(len(l))
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
start := slice.SplitOffset(lSize, n, i)
|
||||
end := slice.SplitOffset(lSize, n, i+1)
|
||||
divided = append(divided, l[start:end])
|
||||
@@ -103,10 +103,7 @@ func ComputeShuffledIndex(index primitives.ValidatorIndex, indexCount uint64, se
|
||||
pivot := hash8Int % indexCount
|
||||
flip := (pivot + indexCount - uint64(index)) % indexCount
|
||||
// Consider every pair only once by picking the highest pair index to retrieve randomness.
|
||||
position := uint64(index)
|
||||
if flip > position {
|
||||
position = flip
|
||||
}
|
||||
position := max(flip, uint64(index))
|
||||
// Add position except its last byte to []buf for randomness,
|
||||
// it will be used later to select a bit from the resulting hash.
|
||||
binary.LittleEndian.PutUint64(posBuffer[:8], position>>8)
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestShuffleList_OK(t *testing.T) {
|
||||
var list1 []primitives.ValidatorIndex
|
||||
seed1 := [32]byte{1, 128, 12}
|
||||
seed2 := [32]byte{2, 128, 12}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
list1 = append(list1, primitives.ValidatorIndex(i))
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
numValidators := uint64(64000)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
for i := range numValidators {
|
||||
l = append(l, i)
|
||||
}
|
||||
split := SplitIndices(l, uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -104,7 +104,7 @@ func BenchmarkIndexComparison(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
b.Run(fmt.Sprintf("Indexwise_ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
// Simulate a list-shuffle by running shuffle-index listSize times.
|
||||
for j := primitives.ValidatorIndex(0); uint64(j) < listSize; j++ {
|
||||
_, err := ShuffledIndex(j, listSize, seed)
|
||||
@@ -120,11 +120,11 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
testIndices := make([]primitives.ValidatorIndex, listSize)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
for i := range listSize {
|
||||
testIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
b.Run(fmt.Sprintf("ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
_, err := ShuffleList(testIndices, seed)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
@@ -161,12 +161,12 @@ func TestSplitIndicesAndOffset_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
validators := uint64(64000)
|
||||
for i := uint64(0); i < validators; i++ {
|
||||
for i := range validators {
|
||||
l = append(l, i)
|
||||
}
|
||||
chunks := uint64(6)
|
||||
split := SplitIndices(l, chunks)
|
||||
for i := uint64(0); i < chunks; i++ {
|
||||
for i := range chunks {
|
||||
if !reflect.DeepEqual(split[i], l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)]) {
|
||||
t.Errorf("Want: %v got: %v", l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)], split[i])
|
||||
break
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestCurrentPeriodPositions(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -56,7 +56,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -87,7 +87,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -116,7 +116,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -144,7 +144,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -175,7 +175,7 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -203,7 +203,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -231,7 +231,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -262,7 +262,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -304,7 +304,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -332,7 +332,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -363,7 +363,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -391,7 +391,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -449,7 +449,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -184,7 +184,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -861,7 +861,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
|
||||
validators := make([]*ethpb.Validator, 4)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < 4; i++ {
|
||||
for i := range uint64(4) {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -270,7 +270,7 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
validators := make([]*ethpb.Validator, valCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < valCount; i++ {
|
||||
for i := range valCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -45,12 +45,13 @@ go_test(
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -96,8 +97,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
numberOfColumns := uint64(fieldparams.NumberOfColumns)
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
@@ -112,8 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
|
||||
@@ -30,7 +30,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -107,3 +108,102 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
|
||||
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
|
||||
type ColumnIndices map[uint64]struct{}
|
||||
|
||||
// Has returns true if the index is present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of indices present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
// Set sets the index in the ColumnIndices.
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
|
||||
// Unset removes the index from the ColumnIndices.
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
// Copy creates a copy of the ColumnIndices.
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
maps.Copy(newCi, ci)
|
||||
return newCi
|
||||
}
|
||||
|
||||
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Merge(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
|
||||
// In the future ColumnIndices may be changed to a bit map, so using
|
||||
// ToMap will ensure forwards-compatibility.
|
||||
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
|
||||
return ci.Copy()
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
|
||||
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
|
||||
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
|
||||
// lots of loops for things like intersections/unions or copies.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndices creates an empty ColumnIndices.
|
||||
// In the future ColumnIndices may change from a reference type to a value type,
|
||||
// so using this constructor will ensure forwards-compatibility.
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -25,3 +25,10 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewColumnIndicesFromMap(t *testing.T) {
|
||||
t.Run("nil map", func(t *testing.T) {
|
||||
ci := peerdas.NewColumnIndicesFromMap(nil)
|
||||
require.Equal(t, 0, ci.Count())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,8 +33,7 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
if sidecar.Index >= fieldparams.NumberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
// Generate random KZG commitments `blobCount` blobs.
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
@@ -281,8 +281,11 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
const (
|
||||
blobCount = 12
|
||||
numberOfColumns = fieldparams.NumberOfColumns
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -26,7 +26,40 @@ var (
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
return (fieldparams.NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if numberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, numberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
@@ -253,7 +286,8 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
@@ -295,8 +329,6 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
@@ -315,7 +347,7 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
|
||||
// testBlobSetup holds common test data for blob reconstruction tests.
|
||||
type testBlobSetup struct {
|
||||
blobCount int
|
||||
blobs []kzg.Blob
|
||||
roBlock blocks.ROBlock
|
||||
roDataColumnSidecars []blocks.RODataColumn
|
||||
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
blobCount int
|
||||
blobs []kzg.Blob
|
||||
roBlock blocks.ROBlock
|
||||
roDataColumnSidecars []blocks.RODataColumn
|
||||
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// setupTestBlobs creates a complete test setup with blobs, cells, proofs, and data column sidecars.
|
||||
|
||||
@@ -17,41 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "numberOfColumns=128",
|
||||
numberOfColumns: 128,
|
||||
expected: 64,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=129",
|
||||
numberOfColumns: 129,
|
||||
expected: 65,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=130",
|
||||
numberOfColumns: 130,
|
||||
expected: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
const expected = uint64(64)
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
@@ -200,7 +168,6 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 3
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
@@ -236,7 +203,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
|
||||
for _, proofs := range inputProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
@@ -428,13 +395,12 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
@@ -447,7 +413,6 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestSemiSupernodeCustody(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a test node ID
|
||||
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
|
||||
|
||||
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
|
||||
// Semi-supernode uses 64 custody groups (half of 128)
|
||||
const semiSupernodeCustodyGroupCount = 64
|
||||
|
||||
// Get custody groups for semi-supernode
|
||||
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
|
||||
|
||||
// Verify we get exactly 64 custody columns
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
|
||||
|
||||
// Verify the columns are valid (within 0-127 range)
|
||||
for columnIndex := range custodyColumns {
|
||||
if columnIndex >= numberOfColumns {
|
||||
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, numberOfColumns)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
|
||||
minimumCount := MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, uint64(64), minimumCount)
|
||||
})
|
||||
|
||||
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
|
||||
// Semi-supernode (64 custody groups)
|
||||
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
|
||||
require.NoError(t, err)
|
||||
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Supernode (128 custody groups = all groups)
|
||||
supernodeGroups, err := CustodyGroups(nodeID, 128)
|
||||
require.NoError(t, err)
|
||||
supernodeColumns, err := CustodyColumns(supernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify semi-supernode has exactly half the columns of supernode
|
||||
require.Equal(t, 64, len(semiSupernodeColumns))
|
||||
require.Equal(t, 128, len(supernodeColumns))
|
||||
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
|
||||
|
||||
// Verify all semi-supernode columns are a subset of supernode columns
|
||||
for columnIndex := range semiSupernodeColumns {
|
||||
if !supernodeColumns[columnIndex] {
|
||||
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
numberOfGroups uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{
|
||||
name: "Standard 1:1 ratio (128 columns, 128 groups)",
|
||||
numberOfGroups: 128,
|
||||
expectedResult: 64, // Need half of 128 groups
|
||||
},
|
||||
{
|
||||
name: "2 columns per group (128 columns, 64 groups)",
|
||||
numberOfGroups: 64,
|
||||
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
|
||||
},
|
||||
{
|
||||
name: "4 columns per group (128 columns, 32 groups)",
|
||||
numberOfGroups: 32,
|
||||
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = tt.numberOfGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
result, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
|
||||
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 256
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
// Just check that we got an error about the configuration
|
||||
require.Equal(t, true, len(err.Error()) > 0)
|
||||
})
|
||||
}
|
||||
@@ -102,11 +102,13 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
||||
|
||||
if len(cellsPerBlob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, params.BeaconConfig().NumberOfColumns)
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
@@ -115,9 +117,8 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for idx := range numberOfColumns {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
@@ -216,7 +217,7 @@ func rotateRowsToCols(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, nu
|
||||
if len(cells) != len(proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
for j := range numCols {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
proofCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -59,6 +59,8 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
@@ -69,10 +71,10 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
proofsPerBlob := [][]kzg.Proof{
|
||||
make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Proof, numberOfColumns),
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
@@ -117,7 +119,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
@@ -149,7 +150,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
@@ -197,6 +197,7 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
@@ -212,7 +213,6 @@ func TestReconstructionSource(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
|
||||
@@ -28,8 +28,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
block, err := benchmark.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -60,8 +59,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -83,8 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
|
||||
// at run time.
|
||||
_, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy())
|
||||
@@ -96,8 +93,7 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(b.Context())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -113,8 +109,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
_, err = beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -128,7 +123,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := proto.Marshal(natState)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -137,7 +132,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := natState.MarshalSSZ()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -157,7 +152,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.NoError(b, proto.Unmarshal(protoObject, ðpb.BeaconState{}))
|
||||
}
|
||||
})
|
||||
@@ -165,7 +160,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
sszState := ðpb.BeaconState{}
|
||||
require.NoError(b, sszState.UnmarshalSSZ(sszObject))
|
||||
}
|
||||
@@ -174,7 +169,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
|
||||
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
|
||||
clonedStates := make([]state.BeaconState, runAmount)
|
||||
for i := 0; i < runAmount; i++ {
|
||||
for i := range runAmount {
|
||||
clonedStates[i] = beaconState.Copy()
|
||||
}
|
||||
return clonedStates
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
|
||||
// prepare copies for both states
|
||||
var setups []state.BeaconState
|
||||
for i := uint64(0); i < 300; i++ {
|
||||
for i := range uint64(300) {
|
||||
var st state.BeaconState
|
||||
if i%2 == 0 {
|
||||
st = s1
|
||||
|
||||
@@ -95,7 +95,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -104,17 +104,17 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
scoresMissing := len(preState.Validators()) - len(scores)
|
||||
if scoresMissing > 0 {
|
||||
for i := 0; i < scoresMissing; i++ {
|
||||
for range scoresMissing {
|
||||
scores = append(scores, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -131,17 +131,17 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestGenesisBeaconState_1000(t *testing.T) {
|
||||
deposits := make([]*ethpb.Deposit, 300000)
|
||||
var genesisTime uint64
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&deposits)
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
@@ -40,7 +40,7 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
|
||||
preState, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
fuzzer.Fuzz(preState)
|
||||
@@ -60,7 +60,7 @@ func TestIsValidGenesisState_100000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
var chainStartDepositCount, currentTime uint64
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(&chainStartDepositCount)
|
||||
fuzzer.Fuzz(¤tTime)
|
||||
IsValidGenesisState(chainStartDepositCount, currentTime)
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -45,7 +45,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -68,7 +68,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessSlot(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -86,7 +86,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
|
||||
slot := primitives.Slot(0)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&slot)
|
||||
s, err := ProcessSlots(ctx, state, slot)
|
||||
@@ -105,7 +105,7 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -128,7 +128,7 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -148,7 +148,7 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
time.CanProcessEpoch(state)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -180,7 +180,7 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil || sb.Block.Body.Eth1Data == nil {
|
||||
|
||||
@@ -754,8 +754,7 @@ func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -768,8 +767,7 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -782,8 +780,7 @@ func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
|
||||
@@ -307,7 +307,7 @@ func SlashValidator(
|
||||
// ActivatedValidatorIndices determines the indices activated during the given epoch.
|
||||
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
activations := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
if val.ActivationEpoch <= epoch && epoch < val.ExitEpoch {
|
||||
activations = append(activations, primitives.ValidatorIndex(i))
|
||||
@@ -319,7 +319,7 @@ func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Valid
|
||||
// SlashedValidatorIndices determines the indices slashed during the given epoch.
|
||||
func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
slashed := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch, epoch+params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
if val.WithdrawableEpoch == maxWithdrawableEpoch && val.Slashed {
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -226,7 +226,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
|
||||
@@ -4,14 +4,19 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"bisect.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"log.go",
|
||||
"mock.go",
|
||||
"needs.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -21,6 +26,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -30,11 +36,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"needs_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -45,6 +54,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,12 +23,13 @@ var (
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreBlob struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -42,11 +42,12 @@ type BlobBatchVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
|
||||
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStoreBlob {
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier, shouldRetain RetentionChecker) *LazilyPersistentStoreBlob {
|
||||
return &LazilyPersistentStoreBlob{
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,9 +67,6 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range sidecars {
|
||||
@@ -81,8 +79,17 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for _, b := range blks {
|
||||
if err := s.checkOne(ctx, current, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, s.shouldRetain)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
}
|
||||
@@ -100,7 +107,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
|
||||
sidecars, err := entry.filter(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
@@ -112,7 +119,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
lf := make(logrus.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
@@ -131,13 +138,12 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) ([][]byte, error) {
|
||||
func commitmentsToCheck(b blocks.ROBlock, shouldRetain RetentionChecker) ([][]byte, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
if !shouldRetain(b.Block().Slot()) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func testShouldRetainAlways(s primitives.Slot) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
@@ -26,15 +30,16 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
for i := range commits {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
shouldRetain RetentionChecker
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
@@ -60,6 +65,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
commits: func() [][]byte {
|
||||
mb := params.GetNetworkScheduleEntry(slots.ToEpoch(fulu + 100)).MaxBlobsPerBlock
|
||||
return commits[:mb]
|
||||
@@ -79,7 +85,8 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: fulu + windowSlots + 1,
|
||||
shouldRetain: func(s primitives.Slot) bool { return false },
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
@@ -97,14 +104,15 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.Equal(t, true, len(c) > params.BeaconConfig().MaxBlobsPerBlock(sb.Block().Slot()))
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co, err := commitmentsToCheck(b, c.slot)
|
||||
co, err := commitmentsToCheck(b, c.shouldRetain)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
@@ -126,7 +134,7 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
@@ -153,7 +161,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
@@ -166,11 +174,11 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{}, testShouldRetainAlways)
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), errDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
@@ -183,7 +191,7 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars[1:]...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
244
beacon-chain/das/availability_columns.go
Normal file
244
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custody *custodyRequirement
|
||||
bisector Bisector
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
nodeID enode.ID,
|
||||
cgc uint64,
|
||||
bisector Bisector,
|
||||
shouldRetain RetentionChecker,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custody: &custodyRequirement{nodeID: nodeID, cgc: cgc},
|
||||
bisector: bisector,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(_ primitives.Slot, sidecars ...blocks.RODataColumn) error {
|
||||
for _, sidecar := range sidecars {
|
||||
if err := s.cache.stash(sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, _ primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
toVerify := make([]blocks.RODataColumn, 0)
|
||||
for _, block := range blks {
|
||||
indices, err := s.required(block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x`", block.Root())
|
||||
}
|
||||
if indices.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := keyFromBlock(block)
|
||||
entry := s.cache.entry(key)
|
||||
toVerify, err = entry.append(toVerify, IndicesNotStored(s.store.Summary(block.Root()), indices))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.verifyAndSave(toVerify); err != nil {
|
||||
log.Warn("Batch verification failed, bisecting columns by peer")
|
||||
if err := s.bisectVerification(toVerify); err != nil {
|
||||
return errors.Wrap(err, "bisect verification")
|
||||
}
|
||||
}
|
||||
|
||||
s.cache.cleanup(blks)
|
||||
return nil
|
||||
}
|
||||
|
||||
// required returns the set of column indices to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) required(block blocks.ROBlock) (peerdas.ColumnIndices, error) {
|
||||
if !s.shouldRetain(block.Block().Slot()) {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
// If there are any commitments in the block, there are blobs,
|
||||
// and if there are blobs, we need the columns bisecting those blobs.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
// No DA check needed if the block has no blobs.
|
||||
if len(commitments) == 0 {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
return s.custody.required()
|
||||
}
|
||||
|
||||
// verifyAndSave calls Save on the column store if the columns pass verification.
|
||||
func (s *LazilyPersistentStoreColumn) verifyAndSave(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
verified, err := s.verifyColumns(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify columns")
|
||||
}
|
||||
if err := s.store.Save(verified); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyColumns(columns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
verifier := s.newDataColumnsVerifier(columns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
return verifier.VerifiedRODataColumns()
|
||||
}
|
||||
|
||||
// bisectVerification is used when verification of a batch of columns fails. Since the batch could
|
||||
// span multiple blocks or have been fetched from multiple peers, this pattern enables code using the
|
||||
// store to break the verification into smaller units and learn the results, in order to plan to retry
|
||||
// retrieval of the unusable columns.
|
||||
func (s *LazilyPersistentStoreColumn) bisectVerification(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.bisector == nil {
|
||||
return errors.New("bisector not initialized")
|
||||
}
|
||||
iter, err := s.bisector.Bisect(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Bisector.Bisect")
|
||||
}
|
||||
// It's up to the bisector how to chunk up columns for verification,
|
||||
// which could be by block, or by peer, or any other strategy.
|
||||
// For the purposes of range syncing or backfill this will be by peer,
|
||||
// so that the node can learn which peer is giving us bad data and downscore them.
|
||||
for columns, err := iter.Next(); columns != nil; columns, err = iter.Next() {
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return errors.Wrap(err, "Bisector.Next")
|
||||
}
|
||||
break // io.EOF signals end of iteration
|
||||
}
|
||||
// We save the parts of the batch that have been verified successfully even though we don't know
|
||||
// if all columns for the block will be available until the block is imported.
|
||||
if err := s.verifyAndSave(s.columnsNotStored(columns)); err != nil {
|
||||
iter.OnError(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This should give us a single error representing any unresolved errors seen via onError.
|
||||
return iter.Error()
|
||||
}
|
||||
|
||||
// columnsNotStored filters the list of ROColumnSidecars to only include those that are not found in the storage summary.
|
||||
func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataColumn) []blocks.RODataColumn {
|
||||
// We use this method to filter a set of sidecars that were previously seen to be unavailable on disk. So our base assumption
|
||||
// is that they are still available and we don't need to copy the list. Instead we make a slice of any indices that are unexpectedly
|
||||
// stored and only when we find that the storage view has changed do we need to create a new slice.
|
||||
stored := make(map[int]struct{}, 0)
|
||||
lastRoot := [32]byte{}
|
||||
var sum filesystem.DataColumnStorageSummary
|
||||
for i, sc := range sidecars {
|
||||
if sc.BlockRoot() != lastRoot {
|
||||
sum = s.store.Summary(sc.BlockRoot())
|
||||
lastRoot = sc.BlockRoot()
|
||||
}
|
||||
if sum.HasIndex(sc.Index) {
|
||||
stored[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
// If the view on storage hasn't changed, return the original list.
|
||||
if len(stored) == 0 {
|
||||
return sidecars
|
||||
}
|
||||
shift := 0
|
||||
for i := range sidecars {
|
||||
if _, ok := stored[i]; ok {
|
||||
// If the index is stored, skip and overwrite it.
|
||||
// Track how many spaces down to shift unseen sidecars (to overwrite the previously shifted or seen).
|
||||
shift++
|
||||
continue
|
||||
}
|
||||
if shift > 0 {
|
||||
// If the index is not stored and we have seen stored indices,
|
||||
// we need to shift the current index down.
|
||||
sidecars[i-shift] = sidecars[i]
|
||||
}
|
||||
}
|
||||
return sidecars[:len(sidecars)-shift]
|
||||
}
|
||||
|
||||
type custodyRequirement struct {
|
||||
nodeID enode.ID
|
||||
cgc uint64 // custody group count
|
||||
indices peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
func (c *custodyRequirement) required() (peerdas.ColumnIndices, error) {
|
||||
peerInfo, _, err := peerdas.Info(c.nodeID, c.cgc)
|
||||
if err != nil {
|
||||
return peerdas.NewColumnIndices(), errors.Wrap(err, "peer info")
|
||||
}
|
||||
return peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user